db23433d5fa25d68c983f52f392cf820b4f7fb8d
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include "gdbthread.h" /* for struct thread_info etc. */
48 #include "gdb_stat.h" /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include "gdb_dirent.h"
56 #include "xml-support.h"
57 #include "terminal.h"
58 #include <sys/vfs.h>
59 #include "solib.h"
60 #include "linux-osdata.h"
61 #include "linux-tdep.h"
62 #include "symfile.h"
63 #include "agent.h"
64 #include "tracepoint.h"
65 #include "exceptions.h"
66 #include "linux-ptrace.h"
67 #include "buffer.h"
68 #include "target-descriptions.h"
69 #include "filestuff.h"
70
71 #ifndef SPUFS_MAGIC
72 #define SPUFS_MAGIC 0x23c9b64e
73 #endif
74
75 #ifdef HAVE_PERSONALITY
76 # include <sys/personality.h>
77 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
78 # define ADDR_NO_RANDOMIZE 0x0040000
79 # endif
80 #endif /* HAVE_PERSONALITY */
81
82 /* This comment documents high-level logic of this file.
83
84 Waiting for events in sync mode
85 ===============================
86
87 When waiting for an event in a specific thread, we just use waitpid, passing
88 the specific pid, and not passing WNOHANG.
89
90 When waiting for an event in all threads, waitpid is not quite good. Prior to
91 version 2.4, Linux can either wait for event in main thread, or in secondary
92 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
93 miss an event. The solution is to use non-blocking waitpid, together with
94 sigsuspend. First, we use non-blocking waitpid to get an event in the main
95 process, if any. Second, we use non-blocking waitpid with the __WCLONED
96 flag to check for events in cloned processes. If nothing is found, we use
97 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
98 happened to a child process -- and SIGCHLD will be delivered both for events
99 in main debugged process and in cloned processes. As soon as we know there's
100 an event, we get back to calling nonblocking waitpid with and without
101 __WCLONED.
102
103 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
104 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
105 blocked, the signal becomes pending and sigsuspend immediately
106 notices it and returns.
107
108 Waiting for events in async mode
109 ================================
110
111 In async mode, GDB should always be ready to handle both user input
112 and target events, so neither blocking waitpid nor sigsuspend are
113 viable options. Instead, we should asynchronously notify the GDB main
114 event loop whenever there's an unprocessed event from the target. We
115 detect asynchronous target events by handling SIGCHLD signals. To
116 notify the event loop about target events, the self-pipe trick is used
117 --- a pipe is registered as waitable event source in the event loop,
118 the event loop select/poll's on the read end of this pipe (as well on
119 other event sources, e.g., stdin), and the SIGCHLD handler writes a
120 byte to this pipe. This is more portable than relying on
121 pselect/ppoll, since on kernels that lack those syscalls, libc
122 emulates them with select/poll+sigprocmask, and that is racy
123 (a.k.a. plain broken).
124
125 Obviously, if we fail to notify the event loop if there's a target
126 event, it's bad. OTOH, if we notify the event loop when there's no
127 event from the target, linux_nat_wait will detect that there's no real
128 event to report, and return event of type TARGET_WAITKIND_IGNORE.
129 This is mostly harmless, but it will waste time and is better avoided.
130
131 The main design point is that every time GDB is outside linux-nat.c,
132 we have a SIGCHLD handler installed that is called when something
133 happens to the target and notifies the GDB event loop. Whenever GDB
134 core decides to handle the event, and calls into linux-nat.c, we
135 process things as in sync mode, except that the we never block in
136 sigsuspend.
137
138 While processing an event, we may end up momentarily blocked in
139 waitpid calls. Those waitpid calls, while blocking, are guarantied to
140 return quickly. E.g., in all-stop mode, before reporting to the core
141 that an LWP hit a breakpoint, all LWPs are stopped by sending them
142 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
143 Note that this is different from blocking indefinitely waiting for the
144 next event --- here, we're already handling an event.
145
146 Use of signals
147 ==============
148
149 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
150 signal is not entirely significant; we just need for a signal to be delivered,
151 so that we can intercept it. SIGSTOP's advantage is that it can not be
152 blocked. A disadvantage is that it is not a real-time signal, so it can only
153 be queued once; we do not keep track of other sources of SIGSTOP.
154
155 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
156 use them, because they have special behavior when the signal is generated -
157 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
158 kills the entire thread group.
159
160 A delivered SIGSTOP would stop the entire thread group, not just the thread we
161 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
162 cancel it (by PTRACE_CONT without passing SIGSTOP).
163
164 We could use a real-time signal instead. This would solve those problems; we
165 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
166 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
167 generates it, and there are races with trying to find a signal that is not
168 blocked. */
169
170 #ifndef O_LARGEFILE
171 #define O_LARGEFILE 0
172 #endif
173
174 /* Unlike other extended result codes, WSTOPSIG (status) on
175 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
176 instead SIGTRAP with bit 7 set. */
177 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
178
179 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
180 the use of the multi-threaded target. */
181 static struct target_ops *linux_ops;
182 static struct target_ops linux_ops_saved;
183
184 /* The method to call, if any, when a new thread is attached. */
185 static void (*linux_nat_new_thread) (struct lwp_info *);
186
187 /* The method to call, if any, when a new fork is attached. */
188 static linux_nat_new_fork_ftype *linux_nat_new_fork;
189
190 /* The method to call, if any, when a process is no longer
191 attached. */
192 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
193
194 /* Hook to call prior to resuming a thread. */
195 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
196
197 /* The method to call, if any, when the siginfo object needs to be
198 converted between the layout returned by ptrace, and the layout in
199 the architecture of the inferior. */
200 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
201 gdb_byte *,
202 int);
203
204 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206 static LONGEST (*super_xfer_partial) (struct target_ops *,
207 enum target_object,
208 const char *, gdb_byte *,
209 const gdb_byte *,
210 ULONGEST, LONGEST);
211
212 static unsigned int debug_linux_nat;
213 static void
214 show_debug_linux_nat (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216 {
217 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 value);
219 }
220
221 struct simple_pid_list
222 {
223 int pid;
224 int status;
225 struct simple_pid_list *next;
226 };
227 struct simple_pid_list *stopped_pids;
228
229 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
230 can not be used, 1 if it can. */
231
232 static int linux_supports_tracefork_flag = -1;
233
234 /* This variable is a tri-state flag: -1 for unknown, 0 if
235 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
236
237 static int linux_supports_tracesysgood_flag = -1;
238
239 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
240 PTRACE_O_TRACEVFORKDONE. */
241
242 static int linux_supports_tracevforkdone_flag = -1;
243
244 /* Stores the current used ptrace() options. */
245 static int current_ptrace_options = 0;
246
247 /* Async mode support. */
248
249 /* The read/write ends of the pipe registered as waitable file in the
250 event loop. */
251 static int linux_nat_event_pipe[2] = { -1, -1 };
252
253 /* Flush the event pipe. */
254
255 static void
256 async_file_flush (void)
257 {
258 int ret;
259 char buf;
260
261 do
262 {
263 ret = read (linux_nat_event_pipe[0], &buf, 1);
264 }
265 while (ret >= 0 || (ret == -1 && errno == EINTR));
266 }
267
268 /* Put something (anything, doesn't matter what, or how much) in event
269 pipe, so that the select/poll in the event-loop realizes we have
270 something to process. */
271
272 static void
273 async_file_mark (void)
274 {
275 int ret;
276
277 /* It doesn't really matter what the pipe contains, as long we end
278 up with something in it. Might as well flush the previous
279 left-overs. */
280 async_file_flush ();
281
282 do
283 {
284 ret = write (linux_nat_event_pipe[1], "+", 1);
285 }
286 while (ret == -1 && errno == EINTR);
287
288 /* Ignore EAGAIN. If the pipe is full, the event loop will already
289 be awakened anyway. */
290 }
291
292 static void linux_nat_async (void (*callback)
293 (enum inferior_event_type event_type,
294 void *context),
295 void *context);
296 static int kill_lwp (int lwpid, int signo);
297
298 static int stop_callback (struct lwp_info *lp, void *data);
299
300 static void block_child_signals (sigset_t *prev_mask);
301 static void restore_child_signals_mask (sigset_t *prev_mask);
302
303 struct lwp_info;
304 static struct lwp_info *add_lwp (ptid_t ptid);
305 static void purge_lwp_list (int pid);
306 static void delete_lwp (ptid_t ptid);
307 static struct lwp_info *find_lwp_pid (ptid_t ptid);
308
309 \f
310 /* Trivial list manipulation functions to keep track of a list of
311 new stopped processes. */
312 static void
313 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
314 {
315 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
316
317 new_pid->pid = pid;
318 new_pid->status = status;
319 new_pid->next = *listp;
320 *listp = new_pid;
321 }
322
323 static int
324 in_pid_list_p (struct simple_pid_list *list, int pid)
325 {
326 struct simple_pid_list *p;
327
328 for (p = list; p != NULL; p = p->next)
329 if (p->pid == pid)
330 return 1;
331 return 0;
332 }
333
334 static int
335 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
336 {
337 struct simple_pid_list **p;
338
339 for (p = listp; *p != NULL; p = &(*p)->next)
340 if ((*p)->pid == pid)
341 {
342 struct simple_pid_list *next = (*p)->next;
343
344 *statusp = (*p)->status;
345 xfree (*p);
346 *p = next;
347 return 1;
348 }
349 return 0;
350 }
351
352 \f
353 /* A helper function for linux_test_for_tracefork, called after fork (). */
354
355 static void
356 linux_tracefork_child (void)
357 {
358 ptrace (PTRACE_TRACEME, 0, 0, 0);
359 kill (getpid (), SIGSTOP);
360 fork ();
361 _exit (0);
362 }
363
364 /* Wrapper function for waitpid which handles EINTR. */
365
366 static int
367 my_waitpid (int pid, int *statusp, int flags)
368 {
369 int ret;
370
371 do
372 {
373 ret = waitpid (pid, statusp, flags);
374 }
375 while (ret == -1 && errno == EINTR);
376
377 return ret;
378 }
379
380 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
381
382 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
383 we know that the feature is not available. This may change the tracing
384 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
385
386 However, if it succeeds, we don't know for sure that the feature is
387 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
388 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
389 fork tracing, and let it fork. If the process exits, we assume that we
390 can't use TRACEFORK; if we get the fork notification, and we can extract
391 the new child's PID, then we assume that we can. */
392
393 static void
394 linux_test_for_tracefork (int original_pid)
395 {
396 int child_pid, ret, status;
397 long second_pid;
398
399 linux_supports_tracefork_flag = 0;
400 linux_supports_tracevforkdone_flag = 0;
401
402 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
403 if (ret != 0)
404 return;
405
406 child_pid = fork ();
407 if (child_pid == -1)
408 perror_with_name (("fork"));
409
410 if (child_pid == 0)
411 linux_tracefork_child ();
412
413 ret = my_waitpid (child_pid, &status, 0);
414 if (ret == -1)
415 perror_with_name (("waitpid"));
416 else if (ret != child_pid)
417 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
418 if (! WIFSTOPPED (status))
419 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
420 status);
421
422 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
423 if (ret != 0)
424 {
425 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
426 if (ret != 0)
427 {
428 warning (_("linux_test_for_tracefork: failed to kill child"));
429 return;
430 }
431
432 ret = my_waitpid (child_pid, &status, 0);
433 if (ret != child_pid)
434 warning (_("linux_test_for_tracefork: failed "
435 "to wait for killed child"));
436 else if (!WIFSIGNALED (status))
437 warning (_("linux_test_for_tracefork: unexpected "
438 "wait status 0x%x from killed child"), status);
439
440 return;
441 }
442
443 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
444 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
445 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
446 linux_supports_tracevforkdone_flag = (ret == 0);
447
448 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
449 if (ret != 0)
450 warning (_("linux_test_for_tracefork: failed to resume child"));
451
452 ret = my_waitpid (child_pid, &status, 0);
453
454 if (ret == child_pid && WIFSTOPPED (status)
455 && status >> 16 == PTRACE_EVENT_FORK)
456 {
457 second_pid = 0;
458 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
459 if (ret == 0 && second_pid != 0)
460 {
461 int second_status;
462
463 linux_supports_tracefork_flag = 1;
464 my_waitpid (second_pid, &second_status, 0);
465 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
466 if (ret != 0)
467 warning (_("linux_test_for_tracefork: "
468 "failed to kill second child"));
469 my_waitpid (second_pid, &status, 0);
470 }
471 }
472 else
473 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
474 "(%d, status 0x%x)"), ret, status);
475
476 do
477 {
478 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
479 if (ret != 0)
480 warning ("linux_test_for_tracefork: failed to kill child");
481 my_waitpid (child_pid, &status, 0);
482 }
483 while (WIFSTOPPED (status));
484 }
485
486 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
487
488 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
489 we know that the feature is not available. This may change the tracing
490 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
491
492 static void
493 linux_test_for_tracesysgood (int original_pid)
494 {
495 int ret;
496
497 linux_supports_tracesysgood_flag = 0;
498
499 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
500 if (ret != 0)
501 return;
502
503 linux_supports_tracesysgood_flag = 1;
504 }
505
506 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
507 This function also sets linux_supports_tracesysgood_flag. */
508
509 static int
510 linux_supports_tracesysgood (int pid)
511 {
512 if (linux_supports_tracesysgood_flag == -1)
513 linux_test_for_tracesysgood (pid);
514 return linux_supports_tracesysgood_flag;
515 }
516
517 /* Return non-zero iff we have tracefork functionality available.
518 This function also sets linux_supports_tracefork_flag. */
519
520 static int
521 linux_supports_tracefork (int pid)
522 {
523 if (linux_supports_tracefork_flag == -1)
524 linux_test_for_tracefork (pid);
525 return linux_supports_tracefork_flag;
526 }
527
528 static int
529 linux_supports_tracevforkdone (int pid)
530 {
531 if (linux_supports_tracefork_flag == -1)
532 linux_test_for_tracefork (pid);
533 return linux_supports_tracevforkdone_flag;
534 }
535
536 static void
537 linux_enable_tracesysgood (ptid_t ptid)
538 {
539 int pid = ptid_get_lwp (ptid);
540
541 if (pid == 0)
542 pid = ptid_get_pid (ptid);
543
544 if (linux_supports_tracesysgood (pid) == 0)
545 return;
546
547 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
548
549 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
550 }
551
552 \f
553 void
554 linux_enable_event_reporting (ptid_t ptid)
555 {
556 int pid = ptid_get_lwp (ptid);
557
558 if (pid == 0)
559 pid = ptid_get_pid (ptid);
560
561 if (! linux_supports_tracefork (pid))
562 return;
563
564 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
565 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
566
567 if (linux_supports_tracevforkdone (pid))
568 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
569
570 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
571 read-only process state. */
572
573 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
574 }
575
576 static void
577 linux_child_post_attach (int pid)
578 {
579 linux_enable_event_reporting (pid_to_ptid (pid));
580 linux_enable_tracesysgood (pid_to_ptid (pid));
581 linux_ptrace_init_warnings ();
582 }
583
584 static void
585 linux_child_post_startup_inferior (ptid_t ptid)
586 {
587 linux_enable_event_reporting (ptid);
588 linux_enable_tracesysgood (ptid);
589 linux_ptrace_init_warnings ();
590 }
591
592 /* Return the number of known LWPs in the tgid given by PID. */
593
594 static int
595 num_lwps (int pid)
596 {
597 int count = 0;
598 struct lwp_info *lp;
599
600 for (lp = lwp_list; lp; lp = lp->next)
601 if (ptid_get_pid (lp->ptid) == pid)
602 count++;
603
604 return count;
605 }
606
607 /* Call delete_lwp with prototype compatible for make_cleanup. */
608
609 static void
610 delete_lwp_cleanup (void *lp_voidp)
611 {
612 struct lwp_info *lp = lp_voidp;
613
614 delete_lwp (lp->ptid);
615 }
616
617 static int
618 linux_child_follow_fork (struct target_ops *ops, int follow_child)
619 {
620 int has_vforked;
621 int parent_pid, child_pid;
622
623 has_vforked = (inferior_thread ()->pending_follow.kind
624 == TARGET_WAITKIND_VFORKED);
625 parent_pid = ptid_get_lwp (inferior_ptid);
626 if (parent_pid == 0)
627 parent_pid = ptid_get_pid (inferior_ptid);
628 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
629
630 if (has_vforked
631 && !non_stop /* Non-stop always resumes both branches. */
632 && (!target_is_async_p () || sync_execution)
633 && !(follow_child || detach_fork || sched_multi))
634 {
635 /* The parent stays blocked inside the vfork syscall until the
636 child execs or exits. If we don't let the child run, then
637 the parent stays blocked. If we're telling the parent to run
638 in the foreground, the user will not be able to ctrl-c to get
639 back the terminal, effectively hanging the debug session. */
640 fprintf_filtered (gdb_stderr, _("\
641 Can not resume the parent process over vfork in the foreground while\n\
642 holding the child stopped. Try \"set detach-on-fork\" or \
643 \"set schedule-multiple\".\n"));
644 /* FIXME output string > 80 columns. */
645 return 1;
646 }
647
648 if (! follow_child)
649 {
650 struct lwp_info *child_lp = NULL;
651
652 /* We're already attached to the parent, by default. */
653
654 /* Detach new forked process? */
655 if (detach_fork)
656 {
657 struct cleanup *old_chain;
658
659 /* Before detaching from the child, remove all breakpoints
660 from it. If we forked, then this has already been taken
661 care of by infrun.c. If we vforked however, any
662 breakpoint inserted in the parent is visible in the
663 child, even those added while stopped in a vfork
664 catchpoint. This will remove the breakpoints from the
665 parent also, but they'll be reinserted below. */
666 if (has_vforked)
667 {
668 /* keep breakpoints list in sync. */
669 remove_breakpoints_pid (GET_PID (inferior_ptid));
670 }
671
672 if (info_verbose || debug_linux_nat)
673 {
674 target_terminal_ours ();
675 fprintf_filtered (gdb_stdlog,
676 "Detaching after fork from "
677 "child process %d.\n",
678 child_pid);
679 }
680
681 old_chain = save_inferior_ptid ();
682 inferior_ptid = ptid_build (child_pid, child_pid, 0);
683
684 child_lp = add_lwp (inferior_ptid);
685 child_lp->stopped = 1;
686 child_lp->last_resume_kind = resume_stop;
687 make_cleanup (delete_lwp_cleanup, child_lp);
688
689 if (linux_nat_prepare_to_resume != NULL)
690 linux_nat_prepare_to_resume (child_lp);
691 ptrace (PTRACE_DETACH, child_pid, 0, 0);
692
693 do_cleanups (old_chain);
694 }
695 else
696 {
697 struct inferior *parent_inf, *child_inf;
698 struct cleanup *old_chain;
699
700 /* Add process to GDB's tables. */
701 child_inf = add_inferior (child_pid);
702
703 parent_inf = current_inferior ();
704 child_inf->attach_flag = parent_inf->attach_flag;
705 copy_terminal_info (child_inf, parent_inf);
706 child_inf->gdbarch = parent_inf->gdbarch;
707 copy_inferior_target_desc_info (child_inf, parent_inf);
708
709 old_chain = save_inferior_ptid ();
710 save_current_program_space ();
711
712 inferior_ptid = ptid_build (child_pid, child_pid, 0);
713 add_thread (inferior_ptid);
714 child_lp = add_lwp (inferior_ptid);
715 child_lp->stopped = 1;
716 child_lp->last_resume_kind = resume_stop;
717 child_inf->symfile_flags = SYMFILE_NO_READ;
718
719 /* If this is a vfork child, then the address-space is
720 shared with the parent. */
721 if (has_vforked)
722 {
723 child_inf->pspace = parent_inf->pspace;
724 child_inf->aspace = parent_inf->aspace;
725
726 /* The parent will be frozen until the child is done
727 with the shared region. Keep track of the
728 parent. */
729 child_inf->vfork_parent = parent_inf;
730 child_inf->pending_detach = 0;
731 parent_inf->vfork_child = child_inf;
732 parent_inf->pending_detach = 0;
733 }
734 else
735 {
736 child_inf->aspace = new_address_space ();
737 child_inf->pspace = add_program_space (child_inf->aspace);
738 child_inf->removable = 1;
739 set_current_program_space (child_inf->pspace);
740 clone_program_space (child_inf->pspace, parent_inf->pspace);
741
742 /* Let the shared library layer (solib-svr4) learn about
743 this new process, relocate the cloned exec, pull in
744 shared libraries, and install the solib event
745 breakpoint. If a "cloned-VM" event was propagated
746 better throughout the core, this wouldn't be
747 required. */
748 solib_create_inferior_hook (0);
749 }
750
751 /* Let the thread_db layer learn about this new process. */
752 check_for_thread_db ();
753
754 do_cleanups (old_chain);
755 }
756
757 if (has_vforked)
758 {
759 struct lwp_info *parent_lp;
760 struct inferior *parent_inf;
761
762 parent_inf = current_inferior ();
763
764 /* If we detached from the child, then we have to be careful
765 to not insert breakpoints in the parent until the child
766 is done with the shared memory region. However, if we're
767 staying attached to the child, then we can and should
768 insert breakpoints, so that we can debug it. A
769 subsequent child exec or exit is enough to know when does
770 the child stops using the parent's address space. */
771 parent_inf->waiting_for_vfork_done = detach_fork;
772 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
773
774 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
775 gdb_assert (linux_supports_tracefork_flag >= 0);
776
777 if (linux_supports_tracevforkdone (0))
778 {
779 if (debug_linux_nat)
780 fprintf_unfiltered (gdb_stdlog,
781 "LCFF: waiting for VFORK_DONE on %d\n",
782 parent_pid);
783 parent_lp->stopped = 1;
784
785 /* We'll handle the VFORK_DONE event like any other
786 event, in target_wait. */
787 }
788 else
789 {
790 /* We can't insert breakpoints until the child has
791 finished with the shared memory region. We need to
792 wait until that happens. Ideal would be to just
793 call:
794 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
795 - waitpid (parent_pid, &status, __WALL);
796 However, most architectures can't handle a syscall
797 being traced on the way out if it wasn't traced on
798 the way in.
799
800 We might also think to loop, continuing the child
801 until it exits or gets a SIGTRAP. One problem is
802 that the child might call ptrace with PTRACE_TRACEME.
803
804 There's no simple and reliable way to figure out when
805 the vforked child will be done with its copy of the
806 shared memory. We could step it out of the syscall,
807 two instructions, let it go, and then single-step the
808 parent once. When we have hardware single-step, this
809 would work; with software single-step it could still
810 be made to work but we'd have to be able to insert
811 single-step breakpoints in the child, and we'd have
812 to insert -just- the single-step breakpoint in the
813 parent. Very awkward.
814
815 In the end, the best we can do is to make sure it
816 runs for a little while. Hopefully it will be out of
817 range of any breakpoints we reinsert. Usually this
818 is only the single-step breakpoint at vfork's return
819 point. */
820
821 if (debug_linux_nat)
822 fprintf_unfiltered (gdb_stdlog,
823 "LCFF: no VFORK_DONE "
824 "support, sleeping a bit\n");
825
826 usleep (10000);
827
828 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
829 and leave it pending. The next linux_nat_resume call
830 will notice a pending event, and bypasses actually
831 resuming the inferior. */
832 parent_lp->status = 0;
833 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
834 parent_lp->stopped = 1;
835
836 /* If we're in async mode, need to tell the event loop
837 there's something here to process. */
838 if (target_can_async_p ())
839 async_file_mark ();
840 }
841 }
842 }
843 else
844 {
845 struct inferior *parent_inf, *child_inf;
846 struct lwp_info *child_lp;
847 struct program_space *parent_pspace;
848
849 if (info_verbose || debug_linux_nat)
850 {
851 target_terminal_ours ();
852 if (has_vforked)
853 fprintf_filtered (gdb_stdlog,
854 _("Attaching after process %d "
855 "vfork to child process %d.\n"),
856 parent_pid, child_pid);
857 else
858 fprintf_filtered (gdb_stdlog,
859 _("Attaching after process %d "
860 "fork to child process %d.\n"),
861 parent_pid, child_pid);
862 }
863
864 /* Add the new inferior first, so that the target_detach below
865 doesn't unpush the target. */
866
867 child_inf = add_inferior (child_pid);
868
869 parent_inf = current_inferior ();
870 child_inf->attach_flag = parent_inf->attach_flag;
871 copy_terminal_info (child_inf, parent_inf);
872 child_inf->gdbarch = parent_inf->gdbarch;
873 copy_inferior_target_desc_info (child_inf, parent_inf);
874
875 parent_pspace = parent_inf->pspace;
876
877 /* If we're vforking, we want to hold on to the parent until the
878 child exits or execs. At child exec or exit time we can
879 remove the old breakpoints from the parent and detach or
880 resume debugging it. Otherwise, detach the parent now; we'll
881 want to reuse it's program/address spaces, but we can't set
882 them to the child before removing breakpoints from the
883 parent, otherwise, the breakpoints module could decide to
884 remove breakpoints from the wrong process (since they'd be
885 assigned to the same address space). */
886
887 if (has_vforked)
888 {
889 gdb_assert (child_inf->vfork_parent == NULL);
890 gdb_assert (parent_inf->vfork_child == NULL);
891 child_inf->vfork_parent = parent_inf;
892 child_inf->pending_detach = 0;
893 parent_inf->vfork_child = child_inf;
894 parent_inf->pending_detach = detach_fork;
895 parent_inf->waiting_for_vfork_done = 0;
896 }
897 else if (detach_fork)
898 target_detach (NULL, 0);
899
900 /* Note that the detach above makes PARENT_INF dangling. */
901
902 /* Add the child thread to the appropriate lists, and switch to
903 this new thread, before cloning the program space, and
904 informing the solib layer about this new process. */
905
906 inferior_ptid = ptid_build (child_pid, child_pid, 0);
907 add_thread (inferior_ptid);
908 child_lp = add_lwp (inferior_ptid);
909 child_lp->stopped = 1;
910 child_lp->last_resume_kind = resume_stop;
911
912 /* If this is a vfork child, then the address-space is shared
913 with the parent. If we detached from the parent, then we can
914 reuse the parent's program/address spaces. */
915 if (has_vforked || detach_fork)
916 {
917 child_inf->pspace = parent_pspace;
918 child_inf->aspace = child_inf->pspace->aspace;
919 }
920 else
921 {
922 child_inf->aspace = new_address_space ();
923 child_inf->pspace = add_program_space (child_inf->aspace);
924 child_inf->removable = 1;
925 child_inf->symfile_flags = SYMFILE_NO_READ;
926 set_current_program_space (child_inf->pspace);
927 clone_program_space (child_inf->pspace, parent_pspace);
928
929 /* Let the shared library layer (solib-svr4) learn about
930 this new process, relocate the cloned exec, pull in
931 shared libraries, and install the solib event breakpoint.
932 If a "cloned-VM" event was propagated better throughout
933 the core, this wouldn't be required. */
934 solib_create_inferior_hook (0);
935 }
936
937 /* Let the thread_db layer learn about this new process. */
938 check_for_thread_db ();
939 }
940
941 return 0;
942 }
943
944 \f
945 static int
946 linux_child_insert_fork_catchpoint (int pid)
947 {
948 return !linux_supports_tracefork (pid);
949 }
950
951 static int
952 linux_child_remove_fork_catchpoint (int pid)
953 {
954 return 0;
955 }
956
957 static int
958 linux_child_insert_vfork_catchpoint (int pid)
959 {
960 return !linux_supports_tracefork (pid);
961 }
962
963 static int
964 linux_child_remove_vfork_catchpoint (int pid)
965 {
966 return 0;
967 }
968
969 static int
970 linux_child_insert_exec_catchpoint (int pid)
971 {
972 return !linux_supports_tracefork (pid);
973 }
974
975 static int
976 linux_child_remove_exec_catchpoint (int pid)
977 {
978 return 0;
979 }
980
981 static int
982 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
983 int table_size, int *table)
984 {
985 if (!linux_supports_tracesysgood (pid))
986 return 1;
987
988 /* On GNU/Linux, we ignore the arguments. It means that we only
989 enable the syscall catchpoints, but do not disable them.
990
991 Also, we do not use the `table' information because we do not
992 filter system calls here. We let GDB do the logic for us. */
993 return 0;
994 }
995
996 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
997 are processes sharing the same VM space. A multi-threaded process
998 is basically a group of such processes. However, such a grouping
999 is almost entirely a user-space issue; the kernel doesn't enforce
1000 such a grouping at all (this might change in the future). In
1001 general, we'll rely on the threads library (i.e. the GNU/Linux
1002 Threads library) to provide such a grouping.
1003
1004 It is perfectly well possible to write a multi-threaded application
1005 without the assistance of a threads library, by using the clone
1006 system call directly. This module should be able to give some
1007 rudimentary support for debugging such applications if developers
1008 specify the CLONE_PTRACE flag in the clone system call, and are
1009 using the Linux kernel 2.4 or above.
1010
1011 Note that there are some peculiarities in GNU/Linux that affect
1012 this code:
1013
1014 - In general one should specify the __WCLONE flag to waitpid in
1015 order to make it report events for any of the cloned processes
1016 (and leave it out for the initial process). However, if a cloned
1017 process has exited the exit status is only reported if the
1018 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1019 we cannot use it since GDB must work on older systems too.
1020
1021 - When a traced, cloned process exits and is waited for by the
1022 debugger, the kernel reassigns it to the original parent and
1023 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1024 library doesn't notice this, which leads to the "zombie problem":
1025 When debugged a multi-threaded process that spawns a lot of
1026 threads will run out of processes, even if the threads exit,
1027 because the "zombies" stay around. */
1028
1029 /* List of known LWPs. */
1030 struct lwp_info *lwp_list;
1031 \f
1032
1033 /* Original signal mask. */
1034 static sigset_t normal_mask;
1035
1036 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1037 _initialize_linux_nat. */
1038 static sigset_t suspend_mask;
1039
1040 /* Signals to block to make that sigsuspend work. */
1041 static sigset_t blocked_mask;
1042
1043 /* SIGCHLD action. */
1044 struct sigaction sigchld_action;
1045
1046 /* Block child signals (SIGCHLD and linux threads signals), and store
1047 the previous mask in PREV_MASK. */
1048
1049 static void
1050 block_child_signals (sigset_t *prev_mask)
1051 {
1052 /* Make sure SIGCHLD is blocked. */
1053 if (!sigismember (&blocked_mask, SIGCHLD))
1054 sigaddset (&blocked_mask, SIGCHLD);
1055
1056 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1057 }
1058
1059 /* Restore child signals mask, previously returned by
1060 block_child_signals. */
1061
1062 static void
1063 restore_child_signals_mask (sigset_t *prev_mask)
1064 {
1065 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1066 }
1067
1068 /* Mask of signals to pass directly to the inferior. */
1069 static sigset_t pass_mask;
1070
1071 /* Update signals to pass to the inferior. */
1072 static void
1073 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1074 {
1075 int signo;
1076
1077 sigemptyset (&pass_mask);
1078
1079 for (signo = 1; signo < NSIG; signo++)
1080 {
1081 int target_signo = gdb_signal_from_host (signo);
1082 if (target_signo < numsigs && pass_signals[target_signo])
1083 sigaddset (&pass_mask, signo);
1084 }
1085 }
1086
1087 \f
1088
1089 /* Prototypes for local functions. */
1090 static int stop_wait_callback (struct lwp_info *lp, void *data);
1091 static int linux_thread_alive (ptid_t ptid);
1092 static char *linux_child_pid_to_exec_file (int pid);
1093
1094 \f
1095 /* Convert wait status STATUS to a string. Used for printing debug
1096 messages only. */
1097
1098 static char *
1099 status_to_str (int status)
1100 {
1101 static char buf[64];
1102
1103 if (WIFSTOPPED (status))
1104 {
1105 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1106 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1107 strsignal (SIGTRAP));
1108 else
1109 snprintf (buf, sizeof (buf), "%s (stopped)",
1110 strsignal (WSTOPSIG (status)));
1111 }
1112 else if (WIFSIGNALED (status))
1113 snprintf (buf, sizeof (buf), "%s (terminated)",
1114 strsignal (WTERMSIG (status)));
1115 else
1116 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1117
1118 return buf;
1119 }
1120
1121 /* Destroy and free LP. */
1122
1123 static void
1124 lwp_free (struct lwp_info *lp)
1125 {
1126 xfree (lp->arch_private);
1127 xfree (lp);
1128 }
1129
1130 /* Remove all LWPs belong to PID from the lwp list. */
1131
1132 static void
1133 purge_lwp_list (int pid)
1134 {
1135 struct lwp_info *lp, *lpprev, *lpnext;
1136
1137 lpprev = NULL;
1138
1139 for (lp = lwp_list; lp; lp = lpnext)
1140 {
1141 lpnext = lp->next;
1142
1143 if (ptid_get_pid (lp->ptid) == pid)
1144 {
1145 if (lp == lwp_list)
1146 lwp_list = lp->next;
1147 else
1148 lpprev->next = lp->next;
1149
1150 lwp_free (lp);
1151 }
1152 else
1153 lpprev = lp;
1154 }
1155 }
1156
1157 /* Add the LWP specified by PTID to the list. PTID is the first LWP
1158 in the process. Return a pointer to the structure describing the
1159 new LWP.
1160
1161 This differs from add_lwp in that we don't let the arch specific
1162 bits know about this new thread. Current clients of this callback
1163 take the opportunity to install watchpoints in the new thread, and
1164 we shouldn't do that for the first thread. If we're spawning a
1165 child ("run"), the thread executes the shell wrapper first, and we
1166 shouldn't touch it until it execs the program we want to debug.
1167 For "attach", it'd be okay to call the callback, but it's not
1168 necessary, because watchpoints can't yet have been inserted into
1169 the inferior. */
1170
1171 static struct lwp_info *
1172 add_initial_lwp (ptid_t ptid)
1173 {
1174 struct lwp_info *lp;
1175
1176 gdb_assert (is_lwp (ptid));
1177
1178 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1179
1180 memset (lp, 0, sizeof (struct lwp_info));
1181
1182 lp->last_resume_kind = resume_continue;
1183 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1184
1185 lp->ptid = ptid;
1186 lp->core = -1;
1187
1188 lp->next = lwp_list;
1189 lwp_list = lp;
1190
1191 return lp;
1192 }
1193
1194 /* Add the LWP specified by PID to the list. Return a pointer to the
1195 structure describing the new LWP. The LWP should already be
1196 stopped. */
1197
1198 static struct lwp_info *
1199 add_lwp (ptid_t ptid)
1200 {
1201 struct lwp_info *lp;
1202
1203 lp = add_initial_lwp (ptid);
1204
1205 /* Let the arch specific bits know about this new thread. Current
1206 clients of this callback take the opportunity to install
1207 watchpoints in the new thread. We don't do this for the first
1208 thread though. See add_initial_lwp. */
1209 if (linux_nat_new_thread != NULL)
1210 linux_nat_new_thread (lp);
1211
1212 return lp;
1213 }
1214
1215 /* Remove the LWP specified by PID from the list. */
1216
1217 static void
1218 delete_lwp (ptid_t ptid)
1219 {
1220 struct lwp_info *lp, *lpprev;
1221
1222 lpprev = NULL;
1223
1224 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1225 if (ptid_equal (lp->ptid, ptid))
1226 break;
1227
1228 if (!lp)
1229 return;
1230
1231 if (lpprev)
1232 lpprev->next = lp->next;
1233 else
1234 lwp_list = lp->next;
1235
1236 lwp_free (lp);
1237 }
1238
1239 /* Return a pointer to the structure describing the LWP corresponding
1240 to PID. If no corresponding LWP could be found, return NULL. */
1241
1242 static struct lwp_info *
1243 find_lwp_pid (ptid_t ptid)
1244 {
1245 struct lwp_info *lp;
1246 int lwp;
1247
1248 if (is_lwp (ptid))
1249 lwp = GET_LWP (ptid);
1250 else
1251 lwp = GET_PID (ptid);
1252
1253 for (lp = lwp_list; lp; lp = lp->next)
1254 if (lwp == GET_LWP (lp->ptid))
1255 return lp;
1256
1257 return NULL;
1258 }
1259
1260 /* Call CALLBACK with its second argument set to DATA for every LWP in
1261 the list. If CALLBACK returns 1 for a particular LWP, return a
1262 pointer to the structure describing that LWP immediately.
1263 Otherwise return NULL. */
1264
1265 struct lwp_info *
1266 iterate_over_lwps (ptid_t filter,
1267 int (*callback) (struct lwp_info *, void *),
1268 void *data)
1269 {
1270 struct lwp_info *lp, *lpnext;
1271
1272 for (lp = lwp_list; lp; lp = lpnext)
1273 {
1274 lpnext = lp->next;
1275
1276 if (ptid_match (lp->ptid, filter))
1277 {
1278 if ((*callback) (lp, data))
1279 return lp;
1280 }
1281 }
1282
1283 return NULL;
1284 }
1285
1286 /* Update our internal state when changing from one checkpoint to
1287 another indicated by NEW_PTID. We can only switch single-threaded
1288 applications, so we only create one new LWP, and the previous list
1289 is discarded. */
1290
1291 void
1292 linux_nat_switch_fork (ptid_t new_ptid)
1293 {
1294 struct lwp_info *lp;
1295
1296 purge_lwp_list (GET_PID (inferior_ptid));
1297
1298 lp = add_lwp (new_ptid);
1299 lp->stopped = 1;
1300
1301 /* This changes the thread's ptid while preserving the gdb thread
1302 num. Also changes the inferior pid, while preserving the
1303 inferior num. */
1304 thread_change_ptid (inferior_ptid, new_ptid);
1305
1306 /* We've just told GDB core that the thread changed target id, but,
1307 in fact, it really is a different thread, with different register
1308 contents. */
1309 registers_changed ();
1310 }
1311
1312 /* Handle the exit of a single thread LP. */
1313
1314 static void
1315 exit_lwp (struct lwp_info *lp)
1316 {
1317 struct thread_info *th = find_thread_ptid (lp->ptid);
1318
1319 if (th)
1320 {
1321 if (print_thread_events)
1322 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1323
1324 delete_thread (lp->ptid);
1325 }
1326
1327 delete_lwp (lp->ptid);
1328 }
1329
1330 /* Wait for the LWP specified by LP, which we have just attached to.
1331 Returns a wait status for that LWP, to cache. */
1332
1333 static int
1334 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1335 int *signalled)
1336 {
1337 pid_t new_pid, pid = GET_LWP (ptid);
1338 int status;
1339
1340 if (linux_proc_pid_is_stopped (pid))
1341 {
1342 if (debug_linux_nat)
1343 fprintf_unfiltered (gdb_stdlog,
1344 "LNPAW: Attaching to a stopped process\n");
1345
1346 /* The process is definitely stopped. It is in a job control
1347 stop, unless the kernel predates the TASK_STOPPED /
1348 TASK_TRACED distinction, in which case it might be in a
1349 ptrace stop. Make sure it is in a ptrace stop; from there we
1350 can kill it, signal it, et cetera.
1351
1352 First make sure there is a pending SIGSTOP. Since we are
1353 already attached, the process can not transition from stopped
1354 to running without a PTRACE_CONT; so we know this signal will
1355 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1356 probably already in the queue (unless this kernel is old
1357 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1358 is not an RT signal, it can only be queued once. */
1359 kill_lwp (pid, SIGSTOP);
1360
1361 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1362 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1363 ptrace (PTRACE_CONT, pid, 0, 0);
1364 }
1365
1366 /* Make sure the initial process is stopped. The user-level threads
1367 layer might want to poke around in the inferior, and that won't
1368 work if things haven't stabilized yet. */
1369 new_pid = my_waitpid (pid, &status, 0);
1370 if (new_pid == -1 && errno == ECHILD)
1371 {
1372 if (first)
1373 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1374
1375 /* Try again with __WCLONE to check cloned processes. */
1376 new_pid = my_waitpid (pid, &status, __WCLONE);
1377 *cloned = 1;
1378 }
1379
1380 gdb_assert (pid == new_pid);
1381
1382 if (!WIFSTOPPED (status))
1383 {
1384 /* The pid we tried to attach has apparently just exited. */
1385 if (debug_linux_nat)
1386 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1387 pid, status_to_str (status));
1388 return status;
1389 }
1390
1391 if (WSTOPSIG (status) != SIGSTOP)
1392 {
1393 *signalled = 1;
1394 if (debug_linux_nat)
1395 fprintf_unfiltered (gdb_stdlog,
1396 "LNPAW: Received %s after attaching\n",
1397 status_to_str (status));
1398 }
1399
1400 return status;
1401 }
1402
1403 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1404 the new LWP could not be attached, or 1 if we're already auto
1405 attached to this thread, but haven't processed the
1406 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1407 its existance, without considering it an error. */
1408
1409 int
1410 lin_lwp_attach_lwp (ptid_t ptid)
1411 {
1412 struct lwp_info *lp;
1413 int lwpid;
1414
1415 gdb_assert (is_lwp (ptid));
1416
1417 lp = find_lwp_pid (ptid);
1418 lwpid = GET_LWP (ptid);
1419
1420 /* We assume that we're already attached to any LWP that has an id
1421 equal to the overall process id, and to any LWP that is already
1422 in our list of LWPs. If we're not seeing exit events from threads
1423 and we've had PID wraparound since we last tried to stop all threads,
1424 this assumption might be wrong; fortunately, this is very unlikely
1425 to happen. */
1426 if (lwpid != GET_PID (ptid) && lp == NULL)
1427 {
1428 int status, cloned = 0, signalled = 0;
1429
1430 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1431 {
1432 if (linux_supports_tracefork_flag)
1433 {
1434 /* If we haven't stopped all threads when we get here,
1435 we may have seen a thread listed in thread_db's list,
1436 but not processed the PTRACE_EVENT_CLONE yet. If
1437 that's the case, ignore this new thread, and let
1438 normal event handling discover it later. */
1439 if (in_pid_list_p (stopped_pids, lwpid))
1440 {
1441 /* We've already seen this thread stop, but we
1442 haven't seen the PTRACE_EVENT_CLONE extended
1443 event yet. */
1444 return 0;
1445 }
1446 else
1447 {
1448 int new_pid;
1449 int status;
1450
1451 /* See if we've got a stop for this new child
1452 pending. If so, we're already attached. */
1453 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1454 if (new_pid == -1 && errno == ECHILD)
1455 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1456 if (new_pid != -1)
1457 {
1458 if (WIFSTOPPED (status))
1459 add_to_pid_list (&stopped_pids, lwpid, status);
1460 return 1;
1461 }
1462 }
1463 }
1464
1465 /* If we fail to attach to the thread, issue a warning,
1466 but continue. One way this can happen is if thread
1467 creation is interrupted; as of Linux kernel 2.6.19, a
1468 bug may place threads in the thread list and then fail
1469 to create them. */
1470 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1471 safe_strerror (errno));
1472 return -1;
1473 }
1474
1475 if (debug_linux_nat)
1476 fprintf_unfiltered (gdb_stdlog,
1477 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1478 target_pid_to_str (ptid));
1479
1480 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1481 if (!WIFSTOPPED (status))
1482 return 1;
1483
1484 lp = add_lwp (ptid);
1485 lp->stopped = 1;
1486 lp->cloned = cloned;
1487 lp->signalled = signalled;
1488 if (WSTOPSIG (status) != SIGSTOP)
1489 {
1490 lp->resumed = 1;
1491 lp->status = status;
1492 }
1493
1494 target_post_attach (GET_LWP (lp->ptid));
1495
1496 if (debug_linux_nat)
1497 {
1498 fprintf_unfiltered (gdb_stdlog,
1499 "LLAL: waitpid %s received %s\n",
1500 target_pid_to_str (ptid),
1501 status_to_str (status));
1502 }
1503 }
1504 else
1505 {
1506 /* We assume that the LWP representing the original process is
1507 already stopped. Mark it as stopped in the data structure
1508 that the GNU/linux ptrace layer uses to keep track of
1509 threads. Note that this won't have already been done since
1510 the main thread will have, we assume, been stopped by an
1511 attach from a different layer. */
1512 if (lp == NULL)
1513 lp = add_lwp (ptid);
1514 lp->stopped = 1;
1515 }
1516
1517 lp->last_resume_kind = resume_stop;
1518 return 0;
1519 }
1520
1521 static void
1522 linux_nat_create_inferior (struct target_ops *ops,
1523 char *exec_file, char *allargs, char **env,
1524 int from_tty)
1525 {
1526 #ifdef HAVE_PERSONALITY
1527 int personality_orig = 0, personality_set = 0;
1528 #endif /* HAVE_PERSONALITY */
1529
1530 /* The fork_child mechanism is synchronous and calls target_wait, so
1531 we have to mask the async mode. */
1532
1533 #ifdef HAVE_PERSONALITY
1534 if (disable_randomization)
1535 {
1536 errno = 0;
1537 personality_orig = personality (0xffffffff);
1538 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1539 {
1540 personality_set = 1;
1541 personality (personality_orig | ADDR_NO_RANDOMIZE);
1542 }
1543 if (errno != 0 || (personality_set
1544 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1545 warning (_("Error disabling address space randomization: %s"),
1546 safe_strerror (errno));
1547 }
1548 #endif /* HAVE_PERSONALITY */
1549
1550 /* Make sure we report all signals during startup. */
1551 linux_nat_pass_signals (0, NULL);
1552
1553 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1554
1555 #ifdef HAVE_PERSONALITY
1556 if (personality_set)
1557 {
1558 errno = 0;
1559 personality (personality_orig);
1560 if (errno != 0)
1561 warning (_("Error restoring address space randomization: %s"),
1562 safe_strerror (errno));
1563 }
1564 #endif /* HAVE_PERSONALITY */
1565 }
1566
1567 static void
1568 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1569 {
1570 struct lwp_info *lp;
1571 int status;
1572 ptid_t ptid;
1573 volatile struct gdb_exception ex;
1574
1575 /* Make sure we report all signals during attach. */
1576 linux_nat_pass_signals (0, NULL);
1577
1578 TRY_CATCH (ex, RETURN_MASK_ERROR)
1579 {
1580 linux_ops->to_attach (ops, args, from_tty);
1581 }
1582 if (ex.reason < 0)
1583 {
1584 pid_t pid = parse_pid_to_attach (args);
1585 struct buffer buffer;
1586 char *message, *buffer_s;
1587
1588 message = xstrdup (ex.message);
1589 make_cleanup (xfree, message);
1590
1591 buffer_init (&buffer);
1592 linux_ptrace_attach_warnings (pid, &buffer);
1593
1594 buffer_grow_str0 (&buffer, "");
1595 buffer_s = buffer_finish (&buffer);
1596 make_cleanup (xfree, buffer_s);
1597
1598 throw_error (ex.error, "%s%s", buffer_s, message);
1599 }
1600
1601 /* The ptrace base target adds the main thread with (pid,0,0)
1602 format. Decorate it with lwp info. */
1603 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1604 thread_change_ptid (inferior_ptid, ptid);
1605
1606 /* Add the initial process as the first LWP to the list. */
1607 lp = add_initial_lwp (ptid);
1608
1609 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1610 &lp->signalled);
1611 if (!WIFSTOPPED (status))
1612 {
1613 if (WIFEXITED (status))
1614 {
1615 int exit_code = WEXITSTATUS (status);
1616
1617 target_terminal_ours ();
1618 target_mourn_inferior ();
1619 if (exit_code == 0)
1620 error (_("Unable to attach: program exited normally."));
1621 else
1622 error (_("Unable to attach: program exited with code %d."),
1623 exit_code);
1624 }
1625 else if (WIFSIGNALED (status))
1626 {
1627 enum gdb_signal signo;
1628
1629 target_terminal_ours ();
1630 target_mourn_inferior ();
1631
1632 signo = gdb_signal_from_host (WTERMSIG (status));
1633 error (_("Unable to attach: program terminated with signal "
1634 "%s, %s."),
1635 gdb_signal_to_name (signo),
1636 gdb_signal_to_string (signo));
1637 }
1638
1639 internal_error (__FILE__, __LINE__,
1640 _("unexpected status %d for PID %ld"),
1641 status, (long) GET_LWP (ptid));
1642 }
1643
1644 lp->stopped = 1;
1645
1646 /* Save the wait status to report later. */
1647 lp->resumed = 1;
1648 if (debug_linux_nat)
1649 fprintf_unfiltered (gdb_stdlog,
1650 "LNA: waitpid %ld, saving status %s\n",
1651 (long) GET_PID (lp->ptid), status_to_str (status));
1652
1653 lp->status = status;
1654
1655 if (target_can_async_p ())
1656 target_async (inferior_event_handler, 0);
1657 }
1658
1659 /* Get pending status of LP. */
1660 static int
1661 get_pending_status (struct lwp_info *lp, int *status)
1662 {
1663 enum gdb_signal signo = GDB_SIGNAL_0;
1664
1665 /* If we paused threads momentarily, we may have stored pending
1666 events in lp->status or lp->waitstatus (see stop_wait_callback),
1667 and GDB core hasn't seen any signal for those threads.
1668 Otherwise, the last signal reported to the core is found in the
1669 thread object's stop_signal.
1670
1671 There's a corner case that isn't handled here at present. Only
1672 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1673 stop_signal make sense as a real signal to pass to the inferior.
1674 Some catchpoint related events, like
1675 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1676 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1677 those traps are debug API (ptrace in our case) related and
1678 induced; the inferior wouldn't see them if it wasn't being
1679 traced. Hence, we should never pass them to the inferior, even
1680 when set to pass state. Since this corner case isn't handled by
1681 infrun.c when proceeding with a signal, for consistency, neither
1682 do we handle it here (or elsewhere in the file we check for
1683 signal pass state). Normally SIGTRAP isn't set to pass state, so
1684 this is really a corner case. */
1685
1686 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1687 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1688 else if (lp->status)
1689 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1690 else if (non_stop && !is_executing (lp->ptid))
1691 {
1692 struct thread_info *tp = find_thread_ptid (lp->ptid);
1693
1694 signo = tp->suspend.stop_signal;
1695 }
1696 else if (!non_stop)
1697 {
1698 struct target_waitstatus last;
1699 ptid_t last_ptid;
1700
1701 get_last_target_status (&last_ptid, &last);
1702
1703 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1704 {
1705 struct thread_info *tp = find_thread_ptid (lp->ptid);
1706
1707 signo = tp->suspend.stop_signal;
1708 }
1709 }
1710
1711 *status = 0;
1712
1713 if (signo == GDB_SIGNAL_0)
1714 {
1715 if (debug_linux_nat)
1716 fprintf_unfiltered (gdb_stdlog,
1717 "GPT: lwp %s has no pending signal\n",
1718 target_pid_to_str (lp->ptid));
1719 }
1720 else if (!signal_pass_state (signo))
1721 {
1722 if (debug_linux_nat)
1723 fprintf_unfiltered (gdb_stdlog,
1724 "GPT: lwp %s had signal %s, "
1725 "but it is in no pass state\n",
1726 target_pid_to_str (lp->ptid),
1727 gdb_signal_to_string (signo));
1728 }
1729 else
1730 {
1731 *status = W_STOPCODE (gdb_signal_to_host (signo));
1732
1733 if (debug_linux_nat)
1734 fprintf_unfiltered (gdb_stdlog,
1735 "GPT: lwp %s has pending signal %s\n",
1736 target_pid_to_str (lp->ptid),
1737 gdb_signal_to_string (signo));
1738 }
1739
1740 return 0;
1741 }
1742
1743 static int
1744 detach_callback (struct lwp_info *lp, void *data)
1745 {
1746 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1747
1748 if (debug_linux_nat && lp->status)
1749 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1750 strsignal (WSTOPSIG (lp->status)),
1751 target_pid_to_str (lp->ptid));
1752
1753 /* If there is a pending SIGSTOP, get rid of it. */
1754 if (lp->signalled)
1755 {
1756 if (debug_linux_nat)
1757 fprintf_unfiltered (gdb_stdlog,
1758 "DC: Sending SIGCONT to %s\n",
1759 target_pid_to_str (lp->ptid));
1760
1761 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1762 lp->signalled = 0;
1763 }
1764
1765 /* We don't actually detach from the LWP that has an id equal to the
1766 overall process id just yet. */
1767 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1768 {
1769 int status = 0;
1770
1771 /* Pass on any pending signal for this LWP. */
1772 get_pending_status (lp, &status);
1773
1774 if (linux_nat_prepare_to_resume != NULL)
1775 linux_nat_prepare_to_resume (lp);
1776 errno = 0;
1777 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1778 WSTOPSIG (status)) < 0)
1779 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1780 safe_strerror (errno));
1781
1782 if (debug_linux_nat)
1783 fprintf_unfiltered (gdb_stdlog,
1784 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1785 target_pid_to_str (lp->ptid),
1786 strsignal (WSTOPSIG (status)));
1787
1788 delete_lwp (lp->ptid);
1789 }
1790
1791 return 0;
1792 }
1793
1794 static void
1795 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1796 {
1797 int pid;
1798 int status;
1799 struct lwp_info *main_lwp;
1800
1801 pid = GET_PID (inferior_ptid);
1802
1803 /* Don't unregister from the event loop, as there may be other
1804 inferiors running. */
1805
1806 /* Stop all threads before detaching. ptrace requires that the
1807 thread is stopped to sucessfully detach. */
1808 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1809 /* ... and wait until all of them have reported back that
1810 they're no longer running. */
1811 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1812
1813 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1814
1815 /* Only the initial process should be left right now. */
1816 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1817
1818 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1819
1820 /* Pass on any pending signal for the last LWP. */
1821 if ((args == NULL || *args == '\0')
1822 && get_pending_status (main_lwp, &status) != -1
1823 && WIFSTOPPED (status))
1824 {
1825 /* Put the signal number in ARGS so that inf_ptrace_detach will
1826 pass it along with PTRACE_DETACH. */
1827 args = alloca (8);
1828 sprintf (args, "%d", (int) WSTOPSIG (status));
1829 if (debug_linux_nat)
1830 fprintf_unfiltered (gdb_stdlog,
1831 "LND: Sending signal %s to %s\n",
1832 args,
1833 target_pid_to_str (main_lwp->ptid));
1834 }
1835
1836 if (linux_nat_prepare_to_resume != NULL)
1837 linux_nat_prepare_to_resume (main_lwp);
1838 delete_lwp (main_lwp->ptid);
1839
1840 if (forks_exist_p ())
1841 {
1842 /* Multi-fork case. The current inferior_ptid is being detached
1843 from, but there are other viable forks to debug. Detach from
1844 the current fork, and context-switch to the first
1845 available. */
1846 linux_fork_detach (args, from_tty);
1847 }
1848 else
1849 linux_ops->to_detach (ops, args, from_tty);
1850 }
1851
1852 /* Resume LP. */
1853
1854 static void
1855 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1856 {
1857 if (lp->stopped)
1858 {
1859 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1860
1861 if (inf->vfork_child != NULL)
1862 {
1863 if (debug_linux_nat)
1864 fprintf_unfiltered (gdb_stdlog,
1865 "RC: Not resuming %s (vfork parent)\n",
1866 target_pid_to_str (lp->ptid));
1867 }
1868 else if (lp->status == 0
1869 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1870 {
1871 if (debug_linux_nat)
1872 fprintf_unfiltered (gdb_stdlog,
1873 "RC: Resuming sibling %s, %s, %s\n",
1874 target_pid_to_str (lp->ptid),
1875 (signo != GDB_SIGNAL_0
1876 ? strsignal (gdb_signal_to_host (signo))
1877 : "0"),
1878 step ? "step" : "resume");
1879
1880 if (linux_nat_prepare_to_resume != NULL)
1881 linux_nat_prepare_to_resume (lp);
1882 linux_ops->to_resume (linux_ops,
1883 pid_to_ptid (GET_LWP (lp->ptid)),
1884 step, signo);
1885 lp->stopped = 0;
1886 lp->step = step;
1887 lp->stopped_by_watchpoint = 0;
1888 }
1889 else
1890 {
1891 if (debug_linux_nat)
1892 fprintf_unfiltered (gdb_stdlog,
1893 "RC: Not resuming sibling %s (has pending)\n",
1894 target_pid_to_str (lp->ptid));
1895 }
1896 }
1897 else
1898 {
1899 if (debug_linux_nat)
1900 fprintf_unfiltered (gdb_stdlog,
1901 "RC: Not resuming sibling %s (not stopped)\n",
1902 target_pid_to_str (lp->ptid));
1903 }
1904 }
1905
1906 /* Resume LWP, with the last stop signal, if it is in pass state. */
1907
1908 static int
1909 linux_nat_resume_callback (struct lwp_info *lp, void *data)
1910 {
1911 enum gdb_signal signo = GDB_SIGNAL_0;
1912
1913 if (lp->stopped)
1914 {
1915 struct thread_info *thread;
1916
1917 thread = find_thread_ptid (lp->ptid);
1918 if (thread != NULL)
1919 {
1920 if (signal_pass_state (thread->suspend.stop_signal))
1921 signo = thread->suspend.stop_signal;
1922 thread->suspend.stop_signal = GDB_SIGNAL_0;
1923 }
1924 }
1925
1926 resume_lwp (lp, 0, signo);
1927 return 0;
1928 }
1929
1930 static int
1931 resume_clear_callback (struct lwp_info *lp, void *data)
1932 {
1933 lp->resumed = 0;
1934 lp->last_resume_kind = resume_stop;
1935 return 0;
1936 }
1937
1938 static int
1939 resume_set_callback (struct lwp_info *lp, void *data)
1940 {
1941 lp->resumed = 1;
1942 lp->last_resume_kind = resume_continue;
1943 return 0;
1944 }
1945
1946 static void
1947 linux_nat_resume (struct target_ops *ops,
1948 ptid_t ptid, int step, enum gdb_signal signo)
1949 {
1950 struct lwp_info *lp;
1951 int resume_many;
1952
1953 if (debug_linux_nat)
1954 fprintf_unfiltered (gdb_stdlog,
1955 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1956 step ? "step" : "resume",
1957 target_pid_to_str (ptid),
1958 (signo != GDB_SIGNAL_0
1959 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1960 target_pid_to_str (inferior_ptid));
1961
1962 /* A specific PTID means `step only this process id'. */
1963 resume_many = (ptid_equal (minus_one_ptid, ptid)
1964 || ptid_is_pid (ptid));
1965
1966 /* Mark the lwps we're resuming as resumed. */
1967 iterate_over_lwps (ptid, resume_set_callback, NULL);
1968
1969 /* See if it's the current inferior that should be handled
1970 specially. */
1971 if (resume_many)
1972 lp = find_lwp_pid (inferior_ptid);
1973 else
1974 lp = find_lwp_pid (ptid);
1975 gdb_assert (lp != NULL);
1976
1977 /* Remember if we're stepping. */
1978 lp->step = step;
1979 lp->last_resume_kind = step ? resume_step : resume_continue;
1980
1981 /* If we have a pending wait status for this thread, there is no
1982 point in resuming the process. But first make sure that
1983 linux_nat_wait won't preemptively handle the event - we
1984 should never take this short-circuit if we are going to
1985 leave LP running, since we have skipped resuming all the
1986 other threads. This bit of code needs to be synchronized
1987 with linux_nat_wait. */
1988
1989 if (lp->status && WIFSTOPPED (lp->status))
1990 {
1991 if (!lp->step
1992 && WSTOPSIG (lp->status)
1993 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1994 {
1995 if (debug_linux_nat)
1996 fprintf_unfiltered (gdb_stdlog,
1997 "LLR: Not short circuiting for ignored "
1998 "status 0x%x\n", lp->status);
1999
2000 /* FIXME: What should we do if we are supposed to continue
2001 this thread with a signal? */
2002 gdb_assert (signo == GDB_SIGNAL_0);
2003 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
2004 lp->status = 0;
2005 }
2006 }
2007
2008 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2009 {
2010 /* FIXME: What should we do if we are supposed to continue
2011 this thread with a signal? */
2012 gdb_assert (signo == GDB_SIGNAL_0);
2013
2014 if (debug_linux_nat)
2015 fprintf_unfiltered (gdb_stdlog,
2016 "LLR: Short circuiting for status 0x%x\n",
2017 lp->status);
2018
2019 if (target_can_async_p ())
2020 {
2021 target_async (inferior_event_handler, 0);
2022 /* Tell the event loop we have something to process. */
2023 async_file_mark ();
2024 }
2025 return;
2026 }
2027
2028 /* Mark LWP as not stopped to prevent it from being continued by
2029 linux_nat_resume_callback. */
2030 lp->stopped = 0;
2031
2032 if (resume_many)
2033 iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
2034
2035 /* Convert to something the lower layer understands. */
2036 ptid = pid_to_ptid (GET_LWP (lp->ptid));
2037
2038 if (linux_nat_prepare_to_resume != NULL)
2039 linux_nat_prepare_to_resume (lp);
2040 linux_ops->to_resume (linux_ops, ptid, step, signo);
2041 lp->stopped_by_watchpoint = 0;
2042
2043 if (debug_linux_nat)
2044 fprintf_unfiltered (gdb_stdlog,
2045 "LLR: %s %s, %s (resume event thread)\n",
2046 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2047 target_pid_to_str (ptid),
2048 (signo != GDB_SIGNAL_0
2049 ? strsignal (gdb_signal_to_host (signo)) : "0"));
2050
2051 if (target_can_async_p ())
2052 target_async (inferior_event_handler, 0);
2053 }
2054
2055 /* Send a signal to an LWP. */
2056
2057 static int
2058 kill_lwp (int lwpid, int signo)
2059 {
2060 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2061 fails, then we are not using nptl threads and we should be using kill. */
2062
2063 #ifdef HAVE_TKILL_SYSCALL
2064 {
2065 static int tkill_failed;
2066
2067 if (!tkill_failed)
2068 {
2069 int ret;
2070
2071 errno = 0;
2072 ret = syscall (__NR_tkill, lwpid, signo);
2073 if (errno != ENOSYS)
2074 return ret;
2075 tkill_failed = 1;
2076 }
2077 }
2078 #endif
2079
2080 return kill (lwpid, signo);
2081 }
2082
2083 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2084 event, check if the core is interested in it: if not, ignore the
2085 event, and keep waiting; otherwise, we need to toggle the LWP's
2086 syscall entry/exit status, since the ptrace event itself doesn't
2087 indicate it, and report the trap to higher layers. */
2088
2089 static int
2090 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2091 {
2092 struct target_waitstatus *ourstatus = &lp->waitstatus;
2093 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2094 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2095
2096 if (stopping)
2097 {
2098 /* If we're stopping threads, there's a SIGSTOP pending, which
2099 makes it so that the LWP reports an immediate syscall return,
2100 followed by the SIGSTOP. Skip seeing that "return" using
2101 PTRACE_CONT directly, and let stop_wait_callback collect the
2102 SIGSTOP. Later when the thread is resumed, a new syscall
2103 entry event. If we didn't do this (and returned 0), we'd
2104 leave a syscall entry pending, and our caller, by using
2105 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2106 itself. Later, when the user re-resumes this LWP, we'd see
2107 another syscall entry event and we'd mistake it for a return.
2108
2109 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2110 (leaving immediately with LWP->signalled set, without issuing
2111 a PTRACE_CONT), it would still be problematic to leave this
2112 syscall enter pending, as later when the thread is resumed,
2113 it would then see the same syscall exit mentioned above,
2114 followed by the delayed SIGSTOP, while the syscall didn't
2115 actually get to execute. It seems it would be even more
2116 confusing to the user. */
2117
2118 if (debug_linux_nat)
2119 fprintf_unfiltered (gdb_stdlog,
2120 "LHST: ignoring syscall %d "
2121 "for LWP %ld (stopping threads), "
2122 "resuming with PTRACE_CONT for SIGSTOP\n",
2123 syscall_number,
2124 GET_LWP (lp->ptid));
2125
2126 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2127 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2128 return 1;
2129 }
2130
2131 if (catch_syscall_enabled ())
2132 {
2133 /* Always update the entry/return state, even if this particular
2134 syscall isn't interesting to the core now. In async mode,
2135 the user could install a new catchpoint for this syscall
2136 between syscall enter/return, and we'll need to know to
2137 report a syscall return if that happens. */
2138 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2139 ? TARGET_WAITKIND_SYSCALL_RETURN
2140 : TARGET_WAITKIND_SYSCALL_ENTRY);
2141
2142 if (catching_syscall_number (syscall_number))
2143 {
2144 /* Alright, an event to report. */
2145 ourstatus->kind = lp->syscall_state;
2146 ourstatus->value.syscall_number = syscall_number;
2147
2148 if (debug_linux_nat)
2149 fprintf_unfiltered (gdb_stdlog,
2150 "LHST: stopping for %s of syscall %d"
2151 " for LWP %ld\n",
2152 lp->syscall_state
2153 == TARGET_WAITKIND_SYSCALL_ENTRY
2154 ? "entry" : "return",
2155 syscall_number,
2156 GET_LWP (lp->ptid));
2157 return 0;
2158 }
2159
2160 if (debug_linux_nat)
2161 fprintf_unfiltered (gdb_stdlog,
2162 "LHST: ignoring %s of syscall %d "
2163 "for LWP %ld\n",
2164 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2165 ? "entry" : "return",
2166 syscall_number,
2167 GET_LWP (lp->ptid));
2168 }
2169 else
2170 {
2171 /* If we had been syscall tracing, and hence used PT_SYSCALL
2172 before on this LWP, it could happen that the user removes all
2173 syscall catchpoints before we get to process this event.
2174 There are two noteworthy issues here:
2175
2176 - When stopped at a syscall entry event, resuming with
2177 PT_STEP still resumes executing the syscall and reports a
2178 syscall return.
2179
2180 - Only PT_SYSCALL catches syscall enters. If we last
2181 single-stepped this thread, then this event can't be a
2182 syscall enter. If we last single-stepped this thread, this
2183 has to be a syscall exit.
2184
2185 The points above mean that the next resume, be it PT_STEP or
2186 PT_CONTINUE, can not trigger a syscall trace event. */
2187 if (debug_linux_nat)
2188 fprintf_unfiltered (gdb_stdlog,
2189 "LHST: caught syscall event "
2190 "with no syscall catchpoints."
2191 " %d for LWP %ld, ignoring\n",
2192 syscall_number,
2193 GET_LWP (lp->ptid));
2194 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2195 }
2196
2197 /* The core isn't interested in this event. For efficiency, avoid
2198 stopping all threads only to have the core resume them all again.
2199 Since we're not stopping threads, if we're still syscall tracing
2200 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2201 subsequent syscall. Simply resume using the inf-ptrace layer,
2202 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2203
2204 /* Note that gdbarch_get_syscall_number may access registers, hence
2205 fill a regcache. */
2206 registers_changed ();
2207 if (linux_nat_prepare_to_resume != NULL)
2208 linux_nat_prepare_to_resume (lp);
2209 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2210 lp->step, GDB_SIGNAL_0);
2211 return 1;
2212 }
2213
2214 /* Handle a GNU/Linux extended wait response. If we see a clone
2215 event, we need to add the new LWP to our list (and not report the
2216 trap to higher layers). This function returns non-zero if the
2217 event should be ignored and we should wait again. If STOPPING is
2218 true, the new LWP remains stopped, otherwise it is continued. */
2219
2220 static int
2221 linux_handle_extended_wait (struct lwp_info *lp, int status,
2222 int stopping)
2223 {
2224 int pid = GET_LWP (lp->ptid);
2225 struct target_waitstatus *ourstatus = &lp->waitstatus;
2226 int event = status >> 16;
2227
2228 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2229 || event == PTRACE_EVENT_CLONE)
2230 {
2231 unsigned long new_pid;
2232 int ret;
2233
2234 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2235
2236 /* If we haven't already seen the new PID stop, wait for it now. */
2237 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2238 {
2239 /* The new child has a pending SIGSTOP. We can't affect it until it
2240 hits the SIGSTOP, but we're already attached. */
2241 ret = my_waitpid (new_pid, &status,
2242 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2243 if (ret == -1)
2244 perror_with_name (_("waiting for new child"));
2245 else if (ret != new_pid)
2246 internal_error (__FILE__, __LINE__,
2247 _("wait returned unexpected PID %d"), ret);
2248 else if (!WIFSTOPPED (status))
2249 internal_error (__FILE__, __LINE__,
2250 _("wait returned unexpected status 0x%x"), status);
2251 }
2252
2253 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2254
2255 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2256 {
2257 /* The arch-specific native code may need to know about new
2258 forks even if those end up never mapped to an
2259 inferior. */
2260 if (linux_nat_new_fork != NULL)
2261 linux_nat_new_fork (lp, new_pid);
2262 }
2263
2264 if (event == PTRACE_EVENT_FORK
2265 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2266 {
2267 /* Handle checkpointing by linux-fork.c here as a special
2268 case. We don't want the follow-fork-mode or 'catch fork'
2269 to interfere with this. */
2270
2271 /* This won't actually modify the breakpoint list, but will
2272 physically remove the breakpoints from the child. */
2273 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2274
2275 /* Retain child fork in ptrace (stopped) state. */
2276 if (!find_fork_pid (new_pid))
2277 add_fork (new_pid);
2278
2279 /* Report as spurious, so that infrun doesn't want to follow
2280 this fork. We're actually doing an infcall in
2281 linux-fork.c. */
2282 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2283
2284 /* Report the stop to the core. */
2285 return 0;
2286 }
2287
2288 if (event == PTRACE_EVENT_FORK)
2289 ourstatus->kind = TARGET_WAITKIND_FORKED;
2290 else if (event == PTRACE_EVENT_VFORK)
2291 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2292 else
2293 {
2294 struct lwp_info *new_lp;
2295
2296 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2297
2298 if (debug_linux_nat)
2299 fprintf_unfiltered (gdb_stdlog,
2300 "LHEW: Got clone event "
2301 "from LWP %d, new child is LWP %ld\n",
2302 pid, new_pid);
2303
2304 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2305 new_lp->cloned = 1;
2306 new_lp->stopped = 1;
2307
2308 if (WSTOPSIG (status) != SIGSTOP)
2309 {
2310 /* This can happen if someone starts sending signals to
2311 the new thread before it gets a chance to run, which
2312 have a lower number than SIGSTOP (e.g. SIGUSR1).
2313 This is an unlikely case, and harder to handle for
2314 fork / vfork than for clone, so we do not try - but
2315 we handle it for clone events here. We'll send
2316 the other signal on to the thread below. */
2317
2318 new_lp->signalled = 1;
2319 }
2320 else
2321 {
2322 struct thread_info *tp;
2323
2324 /* When we stop for an event in some other thread, and
2325 pull the thread list just as this thread has cloned,
2326 we'll have seen the new thread in the thread_db list
2327 before handling the CLONE event (glibc's
2328 pthread_create adds the new thread to the thread list
2329 before clone'ing, and has the kernel fill in the
2330 thread's tid on the clone call with
2331 CLONE_PARENT_SETTID). If that happened, and the core
2332 had requested the new thread to stop, we'll have
2333 killed it with SIGSTOP. But since SIGSTOP is not an
2334 RT signal, it can only be queued once. We need to be
2335 careful to not resume the LWP if we wanted it to
2336 stop. In that case, we'll leave the SIGSTOP pending.
2337 It will later be reported as GDB_SIGNAL_0. */
2338 tp = find_thread_ptid (new_lp->ptid);
2339 if (tp != NULL && tp->stop_requested)
2340 new_lp->last_resume_kind = resume_stop;
2341 else
2342 status = 0;
2343 }
2344
2345 if (non_stop)
2346 {
2347 /* Add the new thread to GDB's lists as soon as possible
2348 so that:
2349
2350 1) the frontend doesn't have to wait for a stop to
2351 display them, and,
2352
2353 2) we tag it with the correct running state. */
2354
2355 /* If the thread_db layer is active, let it know about
2356 this new thread, and add it to GDB's list. */
2357 if (!thread_db_attach_lwp (new_lp->ptid))
2358 {
2359 /* We're not using thread_db. Add it to GDB's
2360 list. */
2361 target_post_attach (GET_LWP (new_lp->ptid));
2362 add_thread (new_lp->ptid);
2363 }
2364
2365 if (!stopping)
2366 {
2367 set_running (new_lp->ptid, 1);
2368 set_executing (new_lp->ptid, 1);
2369 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2370 resume_stop. */
2371 new_lp->last_resume_kind = resume_continue;
2372 }
2373 }
2374
2375 if (status != 0)
2376 {
2377 /* We created NEW_LP so it cannot yet contain STATUS. */
2378 gdb_assert (new_lp->status == 0);
2379
2380 /* Save the wait status to report later. */
2381 if (debug_linux_nat)
2382 fprintf_unfiltered (gdb_stdlog,
2383 "LHEW: waitpid of new LWP %ld, "
2384 "saving status %s\n",
2385 (long) GET_LWP (new_lp->ptid),
2386 status_to_str (status));
2387 new_lp->status = status;
2388 }
2389
2390 /* Note the need to use the low target ops to resume, to
2391 handle resuming with PT_SYSCALL if we have syscall
2392 catchpoints. */
2393 if (!stopping)
2394 {
2395 new_lp->resumed = 1;
2396
2397 if (status == 0)
2398 {
2399 gdb_assert (new_lp->last_resume_kind == resume_continue);
2400 if (debug_linux_nat)
2401 fprintf_unfiltered (gdb_stdlog,
2402 "LHEW: resuming new LWP %ld\n",
2403 GET_LWP (new_lp->ptid));
2404 if (linux_nat_prepare_to_resume != NULL)
2405 linux_nat_prepare_to_resume (new_lp);
2406 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2407 0, GDB_SIGNAL_0);
2408 new_lp->stopped = 0;
2409 }
2410 }
2411
2412 if (debug_linux_nat)
2413 fprintf_unfiltered (gdb_stdlog,
2414 "LHEW: resuming parent LWP %d\n", pid);
2415 if (linux_nat_prepare_to_resume != NULL)
2416 linux_nat_prepare_to_resume (lp);
2417 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2418 0, GDB_SIGNAL_0);
2419
2420 return 1;
2421 }
2422
2423 return 0;
2424 }
2425
2426 if (event == PTRACE_EVENT_EXEC)
2427 {
2428 if (debug_linux_nat)
2429 fprintf_unfiltered (gdb_stdlog,
2430 "LHEW: Got exec event from LWP %ld\n",
2431 GET_LWP (lp->ptid));
2432
2433 ourstatus->kind = TARGET_WAITKIND_EXECD;
2434 ourstatus->value.execd_pathname
2435 = xstrdup (linux_child_pid_to_exec_file (pid));
2436
2437 return 0;
2438 }
2439
2440 if (event == PTRACE_EVENT_VFORK_DONE)
2441 {
2442 if (current_inferior ()->waiting_for_vfork_done)
2443 {
2444 if (debug_linux_nat)
2445 fprintf_unfiltered (gdb_stdlog,
2446 "LHEW: Got expected PTRACE_EVENT_"
2447 "VFORK_DONE from LWP %ld: stopping\n",
2448 GET_LWP (lp->ptid));
2449
2450 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2451 return 0;
2452 }
2453
2454 if (debug_linux_nat)
2455 fprintf_unfiltered (gdb_stdlog,
2456 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2457 "from LWP %ld: resuming\n",
2458 GET_LWP (lp->ptid));
2459 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2460 return 1;
2461 }
2462
2463 internal_error (__FILE__, __LINE__,
2464 _("unknown ptrace event %d"), event);
2465 }
2466
2467 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2468 exited. */
2469
2470 static int
2471 wait_lwp (struct lwp_info *lp)
2472 {
2473 pid_t pid;
2474 int status = 0;
2475 int thread_dead = 0;
2476 sigset_t prev_mask;
2477
2478 gdb_assert (!lp->stopped);
2479 gdb_assert (lp->status == 0);
2480
2481 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2482 block_child_signals (&prev_mask);
2483
2484 for (;;)
2485 {
2486 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2487 was right and we should just call sigsuspend. */
2488
2489 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2490 if (pid == -1 && errno == ECHILD)
2491 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2492 if (pid == -1 && errno == ECHILD)
2493 {
2494 /* The thread has previously exited. We need to delete it
2495 now because, for some vendor 2.4 kernels with NPTL
2496 support backported, there won't be an exit event unless
2497 it is the main thread. 2.6 kernels will report an exit
2498 event for each thread that exits, as expected. */
2499 thread_dead = 1;
2500 if (debug_linux_nat)
2501 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2502 target_pid_to_str (lp->ptid));
2503 }
2504 if (pid != 0)
2505 break;
2506
2507 /* Bugs 10970, 12702.
2508 Thread group leader may have exited in which case we'll lock up in
2509 waitpid if there are other threads, even if they are all zombies too.
2510 Basically, we're not supposed to use waitpid this way.
2511 __WCLONE is not applicable for the leader so we can't use that.
2512 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2513 process; it gets ESRCH both for the zombie and for running processes.
2514
2515 As a workaround, check if we're waiting for the thread group leader and
2516 if it's a zombie, and avoid calling waitpid if it is.
2517
2518 This is racy, what if the tgl becomes a zombie right after we check?
2519 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2520 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2521
2522 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2523 && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
2524 {
2525 thread_dead = 1;
2526 if (debug_linux_nat)
2527 fprintf_unfiltered (gdb_stdlog,
2528 "WL: Thread group leader %s vanished.\n",
2529 target_pid_to_str (lp->ptid));
2530 break;
2531 }
2532
2533 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2534 get invoked despite our caller had them intentionally blocked by
2535 block_child_signals. This is sensitive only to the loop of
2536 linux_nat_wait_1 and there if we get called my_waitpid gets called
2537 again before it gets to sigsuspend so we can safely let the handlers
2538 get executed here. */
2539
2540 sigsuspend (&suspend_mask);
2541 }
2542
2543 restore_child_signals_mask (&prev_mask);
2544
2545 if (!thread_dead)
2546 {
2547 gdb_assert (pid == GET_LWP (lp->ptid));
2548
2549 if (debug_linux_nat)
2550 {
2551 fprintf_unfiltered (gdb_stdlog,
2552 "WL: waitpid %s received %s\n",
2553 target_pid_to_str (lp->ptid),
2554 status_to_str (status));
2555 }
2556
2557 /* Check if the thread has exited. */
2558 if (WIFEXITED (status) || WIFSIGNALED (status))
2559 {
2560 thread_dead = 1;
2561 if (debug_linux_nat)
2562 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2563 target_pid_to_str (lp->ptid));
2564 }
2565 }
2566
2567 if (thread_dead)
2568 {
2569 exit_lwp (lp);
2570 return 0;
2571 }
2572
2573 gdb_assert (WIFSTOPPED (status));
2574
2575 /* Handle GNU/Linux's syscall SIGTRAPs. */
2576 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2577 {
2578 /* No longer need the sysgood bit. The ptrace event ends up
2579 recorded in lp->waitstatus if we care for it. We can carry
2580 on handling the event like a regular SIGTRAP from here
2581 on. */
2582 status = W_STOPCODE (SIGTRAP);
2583 if (linux_handle_syscall_trap (lp, 1))
2584 return wait_lwp (lp);
2585 }
2586
2587 /* Handle GNU/Linux's extended waitstatus for trace events. */
2588 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2589 {
2590 if (debug_linux_nat)
2591 fprintf_unfiltered (gdb_stdlog,
2592 "WL: Handling extended status 0x%06x\n",
2593 status);
2594 if (linux_handle_extended_wait (lp, status, 1))
2595 return wait_lwp (lp);
2596 }
2597
2598 return status;
2599 }
2600
2601 /* Send a SIGSTOP to LP. */
2602
2603 static int
2604 stop_callback (struct lwp_info *lp, void *data)
2605 {
2606 if (!lp->stopped && !lp->signalled)
2607 {
2608 int ret;
2609
2610 if (debug_linux_nat)
2611 {
2612 fprintf_unfiltered (gdb_stdlog,
2613 "SC: kill %s **<SIGSTOP>**\n",
2614 target_pid_to_str (lp->ptid));
2615 }
2616 errno = 0;
2617 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2618 if (debug_linux_nat)
2619 {
2620 fprintf_unfiltered (gdb_stdlog,
2621 "SC: lwp kill %d %s\n",
2622 ret,
2623 errno ? safe_strerror (errno) : "ERRNO-OK");
2624 }
2625
2626 lp->signalled = 1;
2627 gdb_assert (lp->status == 0);
2628 }
2629
2630 return 0;
2631 }
2632
2633 /* Request a stop on LWP. */
2634
2635 void
2636 linux_stop_lwp (struct lwp_info *lwp)
2637 {
2638 stop_callback (lwp, NULL);
2639 }
2640
2641 /* Return non-zero if LWP PID has a pending SIGINT. */
2642
2643 static int
2644 linux_nat_has_pending_sigint (int pid)
2645 {
2646 sigset_t pending, blocked, ignored;
2647
2648 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2649
2650 if (sigismember (&pending, SIGINT)
2651 && !sigismember (&ignored, SIGINT))
2652 return 1;
2653
2654 return 0;
2655 }
2656
2657 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2658
2659 static int
2660 set_ignore_sigint (struct lwp_info *lp, void *data)
2661 {
2662 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2663 flag to consume the next one. */
2664 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2665 && WSTOPSIG (lp->status) == SIGINT)
2666 lp->status = 0;
2667 else
2668 lp->ignore_sigint = 1;
2669
2670 return 0;
2671 }
2672
2673 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2674 This function is called after we know the LWP has stopped; if the LWP
2675 stopped before the expected SIGINT was delivered, then it will never have
2676 arrived. Also, if the signal was delivered to a shared queue and consumed
2677 by a different thread, it will never be delivered to this LWP. */
2678
2679 static void
2680 maybe_clear_ignore_sigint (struct lwp_info *lp)
2681 {
2682 if (!lp->ignore_sigint)
2683 return;
2684
2685 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2686 {
2687 if (debug_linux_nat)
2688 fprintf_unfiltered (gdb_stdlog,
2689 "MCIS: Clearing bogus flag for %s\n",
2690 target_pid_to_str (lp->ptid));
2691 lp->ignore_sigint = 0;
2692 }
2693 }
2694
2695 /* Fetch the possible triggered data watchpoint info and store it in
2696 LP.
2697
2698 On some archs, like x86, that use debug registers to set
2699 watchpoints, it's possible that the way to know which watched
2700 address trapped, is to check the register that is used to select
2701 which address to watch. Problem is, between setting the watchpoint
2702 and reading back which data address trapped, the user may change
2703 the set of watchpoints, and, as a consequence, GDB changes the
2704 debug registers in the inferior. To avoid reading back a stale
2705 stopped-data-address when that happens, we cache in LP the fact
2706 that a watchpoint trapped, and the corresponding data address, as
2707 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2708 registers meanwhile, we have the cached data we can rely on. */
2709
2710 static void
2711 save_sigtrap (struct lwp_info *lp)
2712 {
2713 struct cleanup *old_chain;
2714
2715 if (linux_ops->to_stopped_by_watchpoint == NULL)
2716 {
2717 lp->stopped_by_watchpoint = 0;
2718 return;
2719 }
2720
2721 old_chain = save_inferior_ptid ();
2722 inferior_ptid = lp->ptid;
2723
2724 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2725
2726 if (lp->stopped_by_watchpoint)
2727 {
2728 if (linux_ops->to_stopped_data_address != NULL)
2729 lp->stopped_data_address_p =
2730 linux_ops->to_stopped_data_address (&current_target,
2731 &lp->stopped_data_address);
2732 else
2733 lp->stopped_data_address_p = 0;
2734 }
2735
2736 do_cleanups (old_chain);
2737 }
2738
2739 /* See save_sigtrap. */
2740
2741 static int
2742 linux_nat_stopped_by_watchpoint (void)
2743 {
2744 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2745
2746 gdb_assert (lp != NULL);
2747
2748 return lp->stopped_by_watchpoint;
2749 }
2750
2751 static int
2752 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2753 {
2754 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2755
2756 gdb_assert (lp != NULL);
2757
2758 *addr_p = lp->stopped_data_address;
2759
2760 return lp->stopped_data_address_p;
2761 }
2762
2763 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2764
2765 static int
2766 sigtrap_is_event (int status)
2767 {
2768 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2769 }
2770
2771 /* SIGTRAP-like events recognizer. */
2772
2773 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2774
2775 /* Check for SIGTRAP-like events in LP. */
2776
2777 static int
2778 linux_nat_lp_status_is_event (struct lwp_info *lp)
2779 {
2780 /* We check for lp->waitstatus in addition to lp->status, because we can
2781 have pending process exits recorded in lp->status
2782 and W_EXITCODE(0,0) == 0. We should probably have an additional
2783 lp->status_p flag. */
2784
2785 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2786 && linux_nat_status_is_event (lp->status));
2787 }
2788
2789 /* Set alternative SIGTRAP-like events recognizer. If
2790 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2791 applied. */
2792
2793 void
2794 linux_nat_set_status_is_event (struct target_ops *t,
2795 int (*status_is_event) (int status))
2796 {
2797 linux_nat_status_is_event = status_is_event;
2798 }
2799
2800 /* Wait until LP is stopped. */
2801
2802 static int
2803 stop_wait_callback (struct lwp_info *lp, void *data)
2804 {
2805 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2806
2807 /* If this is a vfork parent, bail out, it is not going to report
2808 any SIGSTOP until the vfork is done with. */
2809 if (inf->vfork_child != NULL)
2810 return 0;
2811
2812 if (!lp->stopped)
2813 {
2814 int status;
2815
2816 status = wait_lwp (lp);
2817 if (status == 0)
2818 return 0;
2819
2820 if (lp->ignore_sigint && WIFSTOPPED (status)
2821 && WSTOPSIG (status) == SIGINT)
2822 {
2823 lp->ignore_sigint = 0;
2824
2825 errno = 0;
2826 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2827 if (debug_linux_nat)
2828 fprintf_unfiltered (gdb_stdlog,
2829 "PTRACE_CONT %s, 0, 0 (%s) "
2830 "(discarding SIGINT)\n",
2831 target_pid_to_str (lp->ptid),
2832 errno ? safe_strerror (errno) : "OK");
2833
2834 return stop_wait_callback (lp, NULL);
2835 }
2836
2837 maybe_clear_ignore_sigint (lp);
2838
2839 if (WSTOPSIG (status) != SIGSTOP)
2840 {
2841 /* The thread was stopped with a signal other than SIGSTOP. */
2842
2843 save_sigtrap (lp);
2844
2845 if (debug_linux_nat)
2846 fprintf_unfiltered (gdb_stdlog,
2847 "SWC: Pending event %s in %s\n",
2848 status_to_str ((int) status),
2849 target_pid_to_str (lp->ptid));
2850
2851 /* Save the sigtrap event. */
2852 lp->status = status;
2853 gdb_assert (!lp->stopped);
2854 gdb_assert (lp->signalled);
2855 lp->stopped = 1;
2856 }
2857 else
2858 {
2859 /* We caught the SIGSTOP that we intended to catch, so
2860 there's no SIGSTOP pending. */
2861
2862 if (debug_linux_nat)
2863 fprintf_unfiltered (gdb_stdlog,
2864 "SWC: Delayed SIGSTOP caught for %s.\n",
2865 target_pid_to_str (lp->ptid));
2866
2867 lp->stopped = 1;
2868
2869 /* Reset SIGNALLED only after the stop_wait_callback call
2870 above as it does gdb_assert on SIGNALLED. */
2871 lp->signalled = 0;
2872 }
2873 }
2874
2875 return 0;
2876 }
2877
2878 /* Return non-zero if LP has a wait status pending. */
2879
2880 static int
2881 status_callback (struct lwp_info *lp, void *data)
2882 {
2883 /* Only report a pending wait status if we pretend that this has
2884 indeed been resumed. */
2885 if (!lp->resumed)
2886 return 0;
2887
2888 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2889 {
2890 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2891 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2892 0', so a clean process exit can not be stored pending in
2893 lp->status, it is indistinguishable from
2894 no-pending-status. */
2895 return 1;
2896 }
2897
2898 if (lp->status != 0)
2899 return 1;
2900
2901 return 0;
2902 }
2903
2904 /* Return non-zero if LP isn't stopped. */
2905
2906 static int
2907 running_callback (struct lwp_info *lp, void *data)
2908 {
2909 return (!lp->stopped
2910 || ((lp->status != 0
2911 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2912 && lp->resumed));
2913 }
2914
2915 /* Count the LWP's that have had events. */
2916
2917 static int
2918 count_events_callback (struct lwp_info *lp, void *data)
2919 {
2920 int *count = data;
2921
2922 gdb_assert (count != NULL);
2923
2924 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2925 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2926 (*count)++;
2927
2928 return 0;
2929 }
2930
2931 /* Select the LWP (if any) that is currently being single-stepped. */
2932
2933 static int
2934 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2935 {
2936 if (lp->last_resume_kind == resume_step
2937 && lp->status != 0)
2938 return 1;
2939 else
2940 return 0;
2941 }
2942
2943 /* Select the Nth LWP that has had a SIGTRAP event. */
2944
2945 static int
2946 select_event_lwp_callback (struct lwp_info *lp, void *data)
2947 {
2948 int *selector = data;
2949
2950 gdb_assert (selector != NULL);
2951
2952 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2953 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2954 if ((*selector)-- == 0)
2955 return 1;
2956
2957 return 0;
2958 }
2959
2960 static int
2961 cancel_breakpoint (struct lwp_info *lp)
2962 {
2963 /* Arrange for a breakpoint to be hit again later. We don't keep
2964 the SIGTRAP status and don't forward the SIGTRAP signal to the
2965 LWP. We will handle the current event, eventually we will resume
2966 this LWP, and this breakpoint will trap again.
2967
2968 If we do not do this, then we run the risk that the user will
2969 delete or disable the breakpoint, but the LWP will have already
2970 tripped on it. */
2971
2972 struct regcache *regcache = get_thread_regcache (lp->ptid);
2973 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2974 CORE_ADDR pc;
2975
2976 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2977 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2978 {
2979 if (debug_linux_nat)
2980 fprintf_unfiltered (gdb_stdlog,
2981 "CB: Push back breakpoint for %s\n",
2982 target_pid_to_str (lp->ptid));
2983
2984 /* Back up the PC if necessary. */
2985 if (gdbarch_decr_pc_after_break (gdbarch))
2986 regcache_write_pc (regcache, pc);
2987
2988 return 1;
2989 }
2990 return 0;
2991 }
2992
2993 static int
2994 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2995 {
2996 struct lwp_info *event_lp = data;
2997
2998 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2999 if (lp == event_lp)
3000 return 0;
3001
3002 /* If a LWP other than the LWP that we're reporting an event for has
3003 hit a GDB breakpoint (as opposed to some random trap signal),
3004 then just arrange for it to hit it again later. We don't keep
3005 the SIGTRAP status and don't forward the SIGTRAP signal to the
3006 LWP. We will handle the current event, eventually we will resume
3007 all LWPs, and this one will get its breakpoint trap again.
3008
3009 If we do not do this, then we run the risk that the user will
3010 delete or disable the breakpoint, but the LWP will have already
3011 tripped on it. */
3012
3013 if (linux_nat_lp_status_is_event (lp)
3014 && cancel_breakpoint (lp))
3015 /* Throw away the SIGTRAP. */
3016 lp->status = 0;
3017
3018 return 0;
3019 }
3020
3021 /* Select one LWP out of those that have events pending. */
3022
3023 static void
3024 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3025 {
3026 int num_events = 0;
3027 int random_selector;
3028 struct lwp_info *event_lp;
3029
3030 /* Record the wait status for the original LWP. */
3031 (*orig_lp)->status = *status;
3032
3033 /* Give preference to any LWP that is being single-stepped. */
3034 event_lp = iterate_over_lwps (filter,
3035 select_singlestep_lwp_callback, NULL);
3036 if (event_lp != NULL)
3037 {
3038 if (debug_linux_nat)
3039 fprintf_unfiltered (gdb_stdlog,
3040 "SEL: Select single-step %s\n",
3041 target_pid_to_str (event_lp->ptid));
3042 }
3043 else
3044 {
3045 /* No single-stepping LWP. Select one at random, out of those
3046 which have had SIGTRAP events. */
3047
3048 /* First see how many SIGTRAP events we have. */
3049 iterate_over_lwps (filter, count_events_callback, &num_events);
3050
3051 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3052 random_selector = (int)
3053 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3054
3055 if (debug_linux_nat && num_events > 1)
3056 fprintf_unfiltered (gdb_stdlog,
3057 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3058 num_events, random_selector);
3059
3060 event_lp = iterate_over_lwps (filter,
3061 select_event_lwp_callback,
3062 &random_selector);
3063 }
3064
3065 if (event_lp != NULL)
3066 {
3067 /* Switch the event LWP. */
3068 *orig_lp = event_lp;
3069 *status = event_lp->status;
3070 }
3071
3072 /* Flush the wait status for the event LWP. */
3073 (*orig_lp)->status = 0;
3074 }
3075
3076 /* Return non-zero if LP has been resumed. */
3077
3078 static int
3079 resumed_callback (struct lwp_info *lp, void *data)
3080 {
3081 return lp->resumed;
3082 }
3083
3084 /* Stop an active thread, verify it still exists, then resume it. If
3085 the thread ends up with a pending status, then it is not resumed,
3086 and *DATA (really a pointer to int), is set. */
3087
3088 static int
3089 stop_and_resume_callback (struct lwp_info *lp, void *data)
3090 {
3091 int *new_pending_p = data;
3092
3093 if (!lp->stopped)
3094 {
3095 ptid_t ptid = lp->ptid;
3096
3097 stop_callback (lp, NULL);
3098 stop_wait_callback (lp, NULL);
3099
3100 /* Resume if the lwp still exists, and the core wanted it
3101 running. */
3102 lp = find_lwp_pid (ptid);
3103 if (lp != NULL)
3104 {
3105 if (lp->last_resume_kind == resume_stop
3106 && lp->status == 0)
3107 {
3108 /* The core wanted the LWP to stop. Even if it stopped
3109 cleanly (with SIGSTOP), leave the event pending. */
3110 if (debug_linux_nat)
3111 fprintf_unfiltered (gdb_stdlog,
3112 "SARC: core wanted LWP %ld stopped "
3113 "(leaving SIGSTOP pending)\n",
3114 GET_LWP (lp->ptid));
3115 lp->status = W_STOPCODE (SIGSTOP);
3116 }
3117
3118 if (lp->status == 0)
3119 {
3120 if (debug_linux_nat)
3121 fprintf_unfiltered (gdb_stdlog,
3122 "SARC: re-resuming LWP %ld\n",
3123 GET_LWP (lp->ptid));
3124 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
3125 }
3126 else
3127 {
3128 if (debug_linux_nat)
3129 fprintf_unfiltered (gdb_stdlog,
3130 "SARC: not re-resuming LWP %ld "
3131 "(has pending)\n",
3132 GET_LWP (lp->ptid));
3133 if (new_pending_p)
3134 *new_pending_p = 1;
3135 }
3136 }
3137 }
3138 return 0;
3139 }
3140
3141 /* Check if we should go on and pass this event to common code.
3142 Return the affected lwp if we are, or NULL otherwise. If we stop
3143 all lwps temporarily, we may end up with new pending events in some
3144 other lwp. In that case set *NEW_PENDING_P to true. */
3145
3146 static struct lwp_info *
3147 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3148 {
3149 struct lwp_info *lp;
3150
3151 *new_pending_p = 0;
3152
3153 lp = find_lwp_pid (pid_to_ptid (lwpid));
3154
3155 /* Check for stop events reported by a process we didn't already
3156 know about - anything not already in our LWP list.
3157
3158 If we're expecting to receive stopped processes after
3159 fork, vfork, and clone events, then we'll just add the
3160 new one to our list and go back to waiting for the event
3161 to be reported - the stopped process might be returned
3162 from waitpid before or after the event is.
3163
3164 But note the case of a non-leader thread exec'ing after the
3165 leader having exited, and gone from our lists. The non-leader
3166 thread changes its tid to the tgid. */
3167
3168 if (WIFSTOPPED (status) && lp == NULL
3169 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3170 {
3171 /* A multi-thread exec after we had seen the leader exiting. */
3172 if (debug_linux_nat)
3173 fprintf_unfiltered (gdb_stdlog,
3174 "LLW: Re-adding thread group leader LWP %d.\n",
3175 lwpid);
3176
3177 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3178 lp->stopped = 1;
3179 lp->resumed = 1;
3180 add_thread (lp->ptid);
3181 }
3182
3183 if (WIFSTOPPED (status) && !lp)
3184 {
3185 add_to_pid_list (&stopped_pids, lwpid, status);
3186 return NULL;
3187 }
3188
3189 /* Make sure we don't report an event for the exit of an LWP not in
3190 our list, i.e. not part of the current process. This can happen
3191 if we detach from a program we originally forked and then it
3192 exits. */
3193 if (!WIFSTOPPED (status) && !lp)
3194 return NULL;
3195
3196 /* Handle GNU/Linux's syscall SIGTRAPs. */
3197 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3198 {
3199 /* No longer need the sysgood bit. The ptrace event ends up
3200 recorded in lp->waitstatus if we care for it. We can carry
3201 on handling the event like a regular SIGTRAP from here
3202 on. */
3203 status = W_STOPCODE (SIGTRAP);
3204 if (linux_handle_syscall_trap (lp, 0))
3205 return NULL;
3206 }
3207
3208 /* Handle GNU/Linux's extended waitstatus for trace events. */
3209 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3210 {
3211 if (debug_linux_nat)
3212 fprintf_unfiltered (gdb_stdlog,
3213 "LLW: Handling extended status 0x%06x\n",
3214 status);
3215 if (linux_handle_extended_wait (lp, status, 0))
3216 return NULL;
3217 }
3218
3219 if (linux_nat_status_is_event (status))
3220 save_sigtrap (lp);
3221
3222 /* Check if the thread has exited. */
3223 if ((WIFEXITED (status) || WIFSIGNALED (status))
3224 && num_lwps (GET_PID (lp->ptid)) > 1)
3225 {
3226 /* If this is the main thread, we must stop all threads and verify
3227 if they are still alive. This is because in the nptl thread model
3228 on Linux 2.4, there is no signal issued for exiting LWPs
3229 other than the main thread. We only get the main thread exit
3230 signal once all child threads have already exited. If we
3231 stop all the threads and use the stop_wait_callback to check
3232 if they have exited we can determine whether this signal
3233 should be ignored or whether it means the end of the debugged
3234 application, regardless of which threading model is being
3235 used. */
3236 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3237 {
3238 lp->stopped = 1;
3239 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3240 stop_and_resume_callback, new_pending_p);
3241 }
3242
3243 if (debug_linux_nat)
3244 fprintf_unfiltered (gdb_stdlog,
3245 "LLW: %s exited.\n",
3246 target_pid_to_str (lp->ptid));
3247
3248 if (num_lwps (GET_PID (lp->ptid)) > 1)
3249 {
3250 /* If there is at least one more LWP, then the exit signal
3251 was not the end of the debugged application and should be
3252 ignored. */
3253 exit_lwp (lp);
3254 return NULL;
3255 }
3256 }
3257
3258 /* Check if the current LWP has previously exited. In the nptl
3259 thread model, LWPs other than the main thread do not issue
3260 signals when they exit so we must check whenever the thread has
3261 stopped. A similar check is made in stop_wait_callback(). */
3262 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3263 {
3264 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3265
3266 if (debug_linux_nat)
3267 fprintf_unfiltered (gdb_stdlog,
3268 "LLW: %s exited.\n",
3269 target_pid_to_str (lp->ptid));
3270
3271 exit_lwp (lp);
3272
3273 /* Make sure there is at least one thread running. */
3274 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3275
3276 /* Discard the event. */
3277 return NULL;
3278 }
3279
3280 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3281 an attempt to stop an LWP. */
3282 if (lp->signalled
3283 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3284 {
3285 if (debug_linux_nat)
3286 fprintf_unfiltered (gdb_stdlog,
3287 "LLW: Delayed SIGSTOP caught for %s.\n",
3288 target_pid_to_str (lp->ptid));
3289
3290 lp->signalled = 0;
3291
3292 if (lp->last_resume_kind != resume_stop)
3293 {
3294 /* This is a delayed SIGSTOP. */
3295
3296 registers_changed ();
3297
3298 if (linux_nat_prepare_to_resume != NULL)
3299 linux_nat_prepare_to_resume (lp);
3300 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3301 lp->step, GDB_SIGNAL_0);
3302 if (debug_linux_nat)
3303 fprintf_unfiltered (gdb_stdlog,
3304 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3305 lp->step ?
3306 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3307 target_pid_to_str (lp->ptid));
3308
3309 lp->stopped = 0;
3310 gdb_assert (lp->resumed);
3311
3312 /* Discard the event. */
3313 return NULL;
3314 }
3315 }
3316
3317 /* Make sure we don't report a SIGINT that we have already displayed
3318 for another thread. */
3319 if (lp->ignore_sigint
3320 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3321 {
3322 if (debug_linux_nat)
3323 fprintf_unfiltered (gdb_stdlog,
3324 "LLW: Delayed SIGINT caught for %s.\n",
3325 target_pid_to_str (lp->ptid));
3326
3327 /* This is a delayed SIGINT. */
3328 lp->ignore_sigint = 0;
3329
3330 registers_changed ();
3331 if (linux_nat_prepare_to_resume != NULL)
3332 linux_nat_prepare_to_resume (lp);
3333 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3334 lp->step, GDB_SIGNAL_0);
3335 if (debug_linux_nat)
3336 fprintf_unfiltered (gdb_stdlog,
3337 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3338 lp->step ?
3339 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3340 target_pid_to_str (lp->ptid));
3341
3342 lp->stopped = 0;
3343 gdb_assert (lp->resumed);
3344
3345 /* Discard the event. */
3346 return NULL;
3347 }
3348
3349 /* An interesting event. */
3350 gdb_assert (lp);
3351 lp->status = status;
3352 return lp;
3353 }
3354
3355 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3356 their exits until all other threads in the group have exited. */
3357
3358 static void
3359 check_zombie_leaders (void)
3360 {
3361 struct inferior *inf;
3362
3363 ALL_INFERIORS (inf)
3364 {
3365 struct lwp_info *leader_lp;
3366
3367 if (inf->pid == 0)
3368 continue;
3369
3370 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3371 if (leader_lp != NULL
3372 /* Check if there are other threads in the group, as we may
3373 have raced with the inferior simply exiting. */
3374 && num_lwps (inf->pid) > 1
3375 && linux_proc_pid_is_zombie (inf->pid))
3376 {
3377 if (debug_linux_nat)
3378 fprintf_unfiltered (gdb_stdlog,
3379 "CZL: Thread group leader %d zombie "
3380 "(it exited, or another thread execd).\n",
3381 inf->pid);
3382
3383 /* A leader zombie can mean one of two things:
3384
3385 - It exited, and there's an exit status pending
3386 available, or only the leader exited (not the whole
3387 program). In the latter case, we can't waitpid the
3388 leader's exit status until all other threads are gone.
3389
3390 - There are 3 or more threads in the group, and a thread
3391 other than the leader exec'd. On an exec, the Linux
3392 kernel destroys all other threads (except the execing
3393 one) in the thread group, and resets the execing thread's
3394 tid to the tgid. No exit notification is sent for the
3395 execing thread -- from the ptracer's perspective, it
3396 appears as though the execing thread just vanishes.
3397 Until we reap all other threads except the leader and the
3398 execing thread, the leader will be zombie, and the
3399 execing thread will be in `D (disc sleep)'. As soon as
3400 all other threads are reaped, the execing thread changes
3401 it's tid to the tgid, and the previous (zombie) leader
3402 vanishes, giving place to the "new" leader. We could try
3403 distinguishing the exit and exec cases, by waiting once
3404 more, and seeing if something comes out, but it doesn't
3405 sound useful. The previous leader _does_ go away, and
3406 we'll re-add the new one once we see the exec event
3407 (which is just the same as what would happen if the
3408 previous leader did exit voluntarily before some other
3409 thread execs). */
3410
3411 if (debug_linux_nat)
3412 fprintf_unfiltered (gdb_stdlog,
3413 "CZL: Thread group leader %d vanished.\n",
3414 inf->pid);
3415 exit_lwp (leader_lp);
3416 }
3417 }
3418 }
3419
3420 static ptid_t
3421 linux_nat_wait_1 (struct target_ops *ops,
3422 ptid_t ptid, struct target_waitstatus *ourstatus,
3423 int target_options)
3424 {
3425 static sigset_t prev_mask;
3426 enum resume_kind last_resume_kind;
3427 struct lwp_info *lp;
3428 int status;
3429
3430 if (debug_linux_nat)
3431 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3432
3433 /* The first time we get here after starting a new inferior, we may
3434 not have added it to the LWP list yet - this is the earliest
3435 moment at which we know its PID. */
3436 if (ptid_is_pid (inferior_ptid))
3437 {
3438 /* Upgrade the main thread's ptid. */
3439 thread_change_ptid (inferior_ptid,
3440 BUILD_LWP (GET_PID (inferior_ptid),
3441 GET_PID (inferior_ptid)));
3442
3443 lp = add_initial_lwp (inferior_ptid);
3444 lp->resumed = 1;
3445 }
3446
3447 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3448 block_child_signals (&prev_mask);
3449
3450 retry:
3451 lp = NULL;
3452 status = 0;
3453
3454 /* First check if there is a LWP with a wait status pending. */
3455 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3456 {
3457 /* Any LWP in the PTID group that's been resumed will do. */
3458 lp = iterate_over_lwps (ptid, status_callback, NULL);
3459 if (lp)
3460 {
3461 if (debug_linux_nat && lp->status)
3462 fprintf_unfiltered (gdb_stdlog,
3463 "LLW: Using pending wait status %s for %s.\n",
3464 status_to_str (lp->status),
3465 target_pid_to_str (lp->ptid));
3466 }
3467 }
3468 else if (is_lwp (ptid))
3469 {
3470 if (debug_linux_nat)
3471 fprintf_unfiltered (gdb_stdlog,
3472 "LLW: Waiting for specific LWP %s.\n",
3473 target_pid_to_str (ptid));
3474
3475 /* We have a specific LWP to check. */
3476 lp = find_lwp_pid (ptid);
3477 gdb_assert (lp);
3478
3479 if (debug_linux_nat && lp->status)
3480 fprintf_unfiltered (gdb_stdlog,
3481 "LLW: Using pending wait status %s for %s.\n",
3482 status_to_str (lp->status),
3483 target_pid_to_str (lp->ptid));
3484
3485 /* We check for lp->waitstatus in addition to lp->status,
3486 because we can have pending process exits recorded in
3487 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3488 an additional lp->status_p flag. */
3489 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3490 lp = NULL;
3491 }
3492
3493 if (!target_can_async_p ())
3494 {
3495 /* Causes SIGINT to be passed on to the attached process. */
3496 set_sigint_trap ();
3497 }
3498
3499 /* But if we don't find a pending event, we'll have to wait. */
3500
3501 while (lp == NULL)
3502 {
3503 pid_t lwpid;
3504
3505 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3506 quirks:
3507
3508 - If the thread group leader exits while other threads in the
3509 thread group still exist, waitpid(TGID, ...) hangs. That
3510 waitpid won't return an exit status until the other threads
3511 in the group are reapped.
3512
3513 - When a non-leader thread execs, that thread just vanishes
3514 without reporting an exit (so we'd hang if we waited for it
3515 explicitly in that case). The exec event is reported to
3516 the TGID pid. */
3517
3518 errno = 0;
3519 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3520 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3521 lwpid = my_waitpid (-1, &status, WNOHANG);
3522
3523 if (debug_linux_nat)
3524 fprintf_unfiltered (gdb_stdlog,
3525 "LNW: waitpid(-1, ...) returned %d, %s\n",
3526 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3527
3528 if (lwpid > 0)
3529 {
3530 /* If this is true, then we paused LWPs momentarily, and may
3531 now have pending events to handle. */
3532 int new_pending;
3533
3534 if (debug_linux_nat)
3535 {
3536 fprintf_unfiltered (gdb_stdlog,
3537 "LLW: waitpid %ld received %s\n",
3538 (long) lwpid, status_to_str (status));
3539 }
3540
3541 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3542
3543 /* STATUS is now no longer valid, use LP->STATUS instead. */
3544 status = 0;
3545
3546 if (lp && !ptid_match (lp->ptid, ptid))
3547 {
3548 gdb_assert (lp->resumed);
3549
3550 if (debug_linux_nat)
3551 fprintf (stderr,
3552 "LWP %ld got an event %06x, leaving pending.\n",
3553 ptid_get_lwp (lp->ptid), lp->status);
3554
3555 if (WIFSTOPPED (lp->status))
3556 {
3557 if (WSTOPSIG (lp->status) != SIGSTOP)
3558 {
3559 /* Cancel breakpoint hits. The breakpoint may
3560 be removed before we fetch events from this
3561 process to report to the core. It is best
3562 not to assume the moribund breakpoints
3563 heuristic always handles these cases --- it
3564 could be too many events go through to the
3565 core before this one is handled. All-stop
3566 always cancels breakpoint hits in all
3567 threads. */
3568 if (non_stop
3569 && linux_nat_lp_status_is_event (lp)
3570 && cancel_breakpoint (lp))
3571 {
3572 /* Throw away the SIGTRAP. */
3573 lp->status = 0;
3574
3575 if (debug_linux_nat)
3576 fprintf (stderr,
3577 "LLW: LWP %ld hit a breakpoint while"
3578 " waiting for another process;"
3579 " cancelled it\n",
3580 ptid_get_lwp (lp->ptid));
3581 }
3582 lp->stopped = 1;
3583 }
3584 else
3585 {
3586 lp->stopped = 1;
3587 lp->signalled = 0;
3588 }
3589 }
3590 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3591 {
3592 if (debug_linux_nat)
3593 fprintf (stderr,
3594 "Process %ld exited while stopping LWPs\n",
3595 ptid_get_lwp (lp->ptid));
3596
3597 /* This was the last lwp in the process. Since
3598 events are serialized to GDB core, and we can't
3599 report this one right now, but GDB core and the
3600 other target layers will want to be notified
3601 about the exit code/signal, leave the status
3602 pending for the next time we're able to report
3603 it. */
3604
3605 /* Prevent trying to stop this thread again. We'll
3606 never try to resume it because it has a pending
3607 status. */
3608 lp->stopped = 1;
3609
3610 /* Dead LWP's aren't expected to reported a pending
3611 sigstop. */
3612 lp->signalled = 0;
3613
3614 /* Store the pending event in the waitstatus as
3615 well, because W_EXITCODE(0,0) == 0. */
3616 store_waitstatus (&lp->waitstatus, lp->status);
3617 }
3618
3619 /* Keep looking. */
3620 lp = NULL;
3621 }
3622
3623 if (new_pending)
3624 {
3625 /* Some LWP now has a pending event. Go all the way
3626 back to check it. */
3627 goto retry;
3628 }
3629
3630 if (lp)
3631 {
3632 /* We got an event to report to the core. */
3633 break;
3634 }
3635
3636 /* Retry until nothing comes out of waitpid. A single
3637 SIGCHLD can indicate more than one child stopped. */
3638 continue;
3639 }
3640
3641 /* Check for zombie thread group leaders. Those can't be reaped
3642 until all other threads in the thread group are. */
3643 check_zombie_leaders ();
3644
3645 /* If there are no resumed children left, bail. We'd be stuck
3646 forever in the sigsuspend call below otherwise. */
3647 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3648 {
3649 if (debug_linux_nat)
3650 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3651
3652 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3653
3654 if (!target_can_async_p ())
3655 clear_sigint_trap ();
3656
3657 restore_child_signals_mask (&prev_mask);
3658 return minus_one_ptid;
3659 }
3660
3661 /* No interesting event to report to the core. */
3662
3663 if (target_options & TARGET_WNOHANG)
3664 {
3665 if (debug_linux_nat)
3666 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3667
3668 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3669 restore_child_signals_mask (&prev_mask);
3670 return minus_one_ptid;
3671 }
3672
3673 /* We shouldn't end up here unless we want to try again. */
3674 gdb_assert (lp == NULL);
3675
3676 /* Block until we get an event reported with SIGCHLD. */
3677 sigsuspend (&suspend_mask);
3678 }
3679
3680 if (!target_can_async_p ())
3681 clear_sigint_trap ();
3682
3683 gdb_assert (lp);
3684
3685 status = lp->status;
3686 lp->status = 0;
3687
3688 /* Don't report signals that GDB isn't interested in, such as
3689 signals that are neither printed nor stopped upon. Stopping all
3690 threads can be a bit time-consuming so if we want decent
3691 performance with heavily multi-threaded programs, especially when
3692 they're using a high frequency timer, we'd better avoid it if we
3693 can. */
3694
3695 if (WIFSTOPPED (status))
3696 {
3697 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3698
3699 /* When using hardware single-step, we need to report every signal.
3700 Otherwise, signals in pass_mask may be short-circuited. */
3701 if (!lp->step
3702 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3703 {
3704 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3705 here? It is not clear we should. GDB may not expect
3706 other threads to run. On the other hand, not resuming
3707 newly attached threads may cause an unwanted delay in
3708 getting them running. */
3709 registers_changed ();
3710 if (linux_nat_prepare_to_resume != NULL)
3711 linux_nat_prepare_to_resume (lp);
3712 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3713 lp->step, signo);
3714 if (debug_linux_nat)
3715 fprintf_unfiltered (gdb_stdlog,
3716 "LLW: %s %s, %s (preempt 'handle')\n",
3717 lp->step ?
3718 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3719 target_pid_to_str (lp->ptid),
3720 (signo != GDB_SIGNAL_0
3721 ? strsignal (gdb_signal_to_host (signo))
3722 : "0"));
3723 lp->stopped = 0;
3724 goto retry;
3725 }
3726
3727 if (!non_stop)
3728 {
3729 /* Only do the below in all-stop, as we currently use SIGINT
3730 to implement target_stop (see linux_nat_stop) in
3731 non-stop. */
3732 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3733 {
3734 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3735 forwarded to the entire process group, that is, all LWPs
3736 will receive it - unless they're using CLONE_THREAD to
3737 share signals. Since we only want to report it once, we
3738 mark it as ignored for all LWPs except this one. */
3739 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3740 set_ignore_sigint, NULL);
3741 lp->ignore_sigint = 0;
3742 }
3743 else
3744 maybe_clear_ignore_sigint (lp);
3745 }
3746 }
3747
3748 /* This LWP is stopped now. */
3749 lp->stopped = 1;
3750
3751 if (debug_linux_nat)
3752 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3753 status_to_str (status), target_pid_to_str (lp->ptid));
3754
3755 if (!non_stop)
3756 {
3757 /* Now stop all other LWP's ... */
3758 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3759
3760 /* ... and wait until all of them have reported back that
3761 they're no longer running. */
3762 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3763
3764 /* If we're not waiting for a specific LWP, choose an event LWP
3765 from among those that have had events. Giving equal priority
3766 to all LWPs that have had events helps prevent
3767 starvation. */
3768 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3769 select_event_lwp (ptid, &lp, &status);
3770
3771 /* Now that we've selected our final event LWP, cancel any
3772 breakpoints in other LWPs that have hit a GDB breakpoint.
3773 See the comment in cancel_breakpoints_callback to find out
3774 why. */
3775 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3776
3777 /* We'll need this to determine whether to report a SIGSTOP as
3778 TARGET_WAITKIND_0. Need to take a copy because
3779 resume_clear_callback clears it. */
3780 last_resume_kind = lp->last_resume_kind;
3781
3782 /* In all-stop, from the core's perspective, all LWPs are now
3783 stopped until a new resume action is sent over. */
3784 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3785 }
3786 else
3787 {
3788 /* See above. */
3789 last_resume_kind = lp->last_resume_kind;
3790 resume_clear_callback (lp, NULL);
3791 }
3792
3793 if (linux_nat_status_is_event (status))
3794 {
3795 if (debug_linux_nat)
3796 fprintf_unfiltered (gdb_stdlog,
3797 "LLW: trap ptid is %s.\n",
3798 target_pid_to_str (lp->ptid));
3799 }
3800
3801 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3802 {
3803 *ourstatus = lp->waitstatus;
3804 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3805 }
3806 else
3807 store_waitstatus (ourstatus, status);
3808
3809 if (debug_linux_nat)
3810 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3811
3812 restore_child_signals_mask (&prev_mask);
3813
3814 if (last_resume_kind == resume_stop
3815 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3816 && WSTOPSIG (status) == SIGSTOP)
3817 {
3818 /* A thread that has been requested to stop by GDB with
3819 target_stop, and it stopped cleanly, so report as SIG0. The
3820 use of SIGSTOP is an implementation detail. */
3821 ourstatus->value.sig = GDB_SIGNAL_0;
3822 }
3823
3824 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3825 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3826 lp->core = -1;
3827 else
3828 lp->core = linux_common_core_of_thread (lp->ptid);
3829
3830 return lp->ptid;
3831 }
3832
3833 /* Resume LWPs that are currently stopped without any pending status
3834 to report, but are resumed from the core's perspective. */
3835
3836 static int
3837 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3838 {
3839 ptid_t *wait_ptid_p = data;
3840
3841 if (lp->stopped
3842 && lp->resumed
3843 && lp->status == 0
3844 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3845 {
3846 struct regcache *regcache = get_thread_regcache (lp->ptid);
3847 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3848 CORE_ADDR pc = regcache_read_pc (regcache);
3849
3850 gdb_assert (is_executing (lp->ptid));
3851
3852 /* Don't bother if there's a breakpoint at PC that we'd hit
3853 immediately, and we're not waiting for this LWP. */
3854 if (!ptid_match (lp->ptid, *wait_ptid_p))
3855 {
3856 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3857 return 0;
3858 }
3859
3860 if (debug_linux_nat)
3861 fprintf_unfiltered (gdb_stdlog,
3862 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3863 target_pid_to_str (lp->ptid),
3864 paddress (gdbarch, pc),
3865 lp->step);
3866
3867 registers_changed ();
3868 if (linux_nat_prepare_to_resume != NULL)
3869 linux_nat_prepare_to_resume (lp);
3870 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3871 lp->step, GDB_SIGNAL_0);
3872 lp->stopped = 0;
3873 lp->stopped_by_watchpoint = 0;
3874 }
3875
3876 return 0;
3877 }
3878
3879 static ptid_t
3880 linux_nat_wait (struct target_ops *ops,
3881 ptid_t ptid, struct target_waitstatus *ourstatus,
3882 int target_options)
3883 {
3884 ptid_t event_ptid;
3885
3886 if (debug_linux_nat)
3887 {
3888 char *options_string;
3889
3890 options_string = target_options_to_string (target_options);
3891 fprintf_unfiltered (gdb_stdlog,
3892 "linux_nat_wait: [%s], [%s]\n",
3893 target_pid_to_str (ptid),
3894 options_string);
3895 xfree (options_string);
3896 }
3897
3898 /* Flush the async file first. */
3899 if (target_can_async_p ())
3900 async_file_flush ();
3901
3902 /* Resume LWPs that are currently stopped without any pending status
3903 to report, but are resumed from the core's perspective. LWPs get
3904 in this state if we find them stopping at a time we're not
3905 interested in reporting the event (target_wait on a
3906 specific_process, for example, see linux_nat_wait_1), and
3907 meanwhile the event became uninteresting. Don't bother resuming
3908 LWPs we're not going to wait for if they'd stop immediately. */
3909 if (non_stop)
3910 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3911
3912 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3913
3914 /* If we requested any event, and something came out, assume there
3915 may be more. If we requested a specific lwp or process, also
3916 assume there may be more. */
3917 if (target_can_async_p ()
3918 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3919 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3920 || !ptid_equal (ptid, minus_one_ptid)))
3921 async_file_mark ();
3922
3923 /* Get ready for the next event. */
3924 if (target_can_async_p ())
3925 target_async (inferior_event_handler, 0);
3926
3927 return event_ptid;
3928 }
3929
3930 static int
3931 kill_callback (struct lwp_info *lp, void *data)
3932 {
3933 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3934
3935 errno = 0;
3936 kill (GET_LWP (lp->ptid), SIGKILL);
3937 if (debug_linux_nat)
3938 fprintf_unfiltered (gdb_stdlog,
3939 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3940 target_pid_to_str (lp->ptid),
3941 errno ? safe_strerror (errno) : "OK");
3942
3943 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3944
3945 errno = 0;
3946 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3947 if (debug_linux_nat)
3948 fprintf_unfiltered (gdb_stdlog,
3949 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3950 target_pid_to_str (lp->ptid),
3951 errno ? safe_strerror (errno) : "OK");
3952
3953 return 0;
3954 }
3955
3956 static int
3957 kill_wait_callback (struct lwp_info *lp, void *data)
3958 {
3959 pid_t pid;
3960
3961 /* We must make sure that there are no pending events (delayed
3962 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3963 program doesn't interfere with any following debugging session. */
3964
3965 /* For cloned processes we must check both with __WCLONE and
3966 without, since the exit status of a cloned process isn't reported
3967 with __WCLONE. */
3968 if (lp->cloned)
3969 {
3970 do
3971 {
3972 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
3973 if (pid != (pid_t) -1)
3974 {
3975 if (debug_linux_nat)
3976 fprintf_unfiltered (gdb_stdlog,
3977 "KWC: wait %s received unknown.\n",
3978 target_pid_to_str (lp->ptid));
3979 /* The Linux kernel sometimes fails to kill a thread
3980 completely after PTRACE_KILL; that goes from the stop
3981 point in do_fork out to the one in
3982 get_signal_to_deliever and waits again. So kill it
3983 again. */
3984 kill_callback (lp, NULL);
3985 }
3986 }
3987 while (pid == GET_LWP (lp->ptid));
3988
3989 gdb_assert (pid == -1 && errno == ECHILD);
3990 }
3991
3992 do
3993 {
3994 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
3995 if (pid != (pid_t) -1)
3996 {
3997 if (debug_linux_nat)
3998 fprintf_unfiltered (gdb_stdlog,
3999 "KWC: wait %s received unk.\n",
4000 target_pid_to_str (lp->ptid));
4001 /* See the call to kill_callback above. */
4002 kill_callback (lp, NULL);
4003 }
4004 }
4005 while (pid == GET_LWP (lp->ptid));
4006
4007 gdb_assert (pid == -1 && errno == ECHILD);
4008 return 0;
4009 }
4010
4011 static void
4012 linux_nat_kill (struct target_ops *ops)
4013 {
4014 struct target_waitstatus last;
4015 ptid_t last_ptid;
4016 int status;
4017
4018 /* If we're stopped while forking and we haven't followed yet,
4019 kill the other task. We need to do this first because the
4020 parent will be sleeping if this is a vfork. */
4021
4022 get_last_target_status (&last_ptid, &last);
4023
4024 if (last.kind == TARGET_WAITKIND_FORKED
4025 || last.kind == TARGET_WAITKIND_VFORKED)
4026 {
4027 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4028 wait (&status);
4029
4030 /* Let the arch-specific native code know this process is
4031 gone. */
4032 linux_nat_forget_process (PIDGET (last.value.related_pid));
4033 }
4034
4035 if (forks_exist_p ())
4036 linux_fork_killall ();
4037 else
4038 {
4039 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4040
4041 /* Stop all threads before killing them, since ptrace requires
4042 that the thread is stopped to sucessfully PTRACE_KILL. */
4043 iterate_over_lwps (ptid, stop_callback, NULL);
4044 /* ... and wait until all of them have reported back that
4045 they're no longer running. */
4046 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4047
4048 /* Kill all LWP's ... */
4049 iterate_over_lwps (ptid, kill_callback, NULL);
4050
4051 /* ... and wait until we've flushed all events. */
4052 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4053 }
4054
4055 target_mourn_inferior ();
4056 }
4057
4058 static void
4059 linux_nat_mourn_inferior (struct target_ops *ops)
4060 {
4061 int pid = ptid_get_pid (inferior_ptid);
4062
4063 purge_lwp_list (pid);
4064
4065 if (! forks_exist_p ())
4066 /* Normal case, no other forks available. */
4067 linux_ops->to_mourn_inferior (ops);
4068 else
4069 /* Multi-fork case. The current inferior_ptid has exited, but
4070 there are other viable forks to debug. Delete the exiting
4071 one and context-switch to the first available. */
4072 linux_fork_mourn_inferior ();
4073
4074 /* Let the arch-specific native code know this process is gone. */
4075 linux_nat_forget_process (pid);
4076 }
4077
4078 /* Convert a native/host siginfo object, into/from the siginfo in the
4079 layout of the inferiors' architecture. */
4080
4081 static void
4082 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
4083 {
4084 int done = 0;
4085
4086 if (linux_nat_siginfo_fixup != NULL)
4087 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4088
4089 /* If there was no callback, or the callback didn't do anything,
4090 then just do a straight memcpy. */
4091 if (!done)
4092 {
4093 if (direction == 1)
4094 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4095 else
4096 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4097 }
4098 }
4099
4100 static LONGEST
4101 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4102 const char *annex, gdb_byte *readbuf,
4103 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4104 {
4105 int pid;
4106 siginfo_t siginfo;
4107 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4108
4109 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4110 gdb_assert (readbuf || writebuf);
4111
4112 pid = GET_LWP (inferior_ptid);
4113 if (pid == 0)
4114 pid = GET_PID (inferior_ptid);
4115
4116 if (offset > sizeof (siginfo))
4117 return -1;
4118
4119 errno = 0;
4120 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4121 if (errno != 0)
4122 return -1;
4123
4124 /* When GDB is built as a 64-bit application, ptrace writes into
4125 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4126 inferior with a 64-bit GDB should look the same as debugging it
4127 with a 32-bit GDB, we need to convert it. GDB core always sees
4128 the converted layout, so any read/write will have to be done
4129 post-conversion. */
4130 siginfo_fixup (&siginfo, inf_siginfo, 0);
4131
4132 if (offset + len > sizeof (siginfo))
4133 len = sizeof (siginfo) - offset;
4134
4135 if (readbuf != NULL)
4136 memcpy (readbuf, inf_siginfo + offset, len);
4137 else
4138 {
4139 memcpy (inf_siginfo + offset, writebuf, len);
4140
4141 /* Convert back to ptrace layout before flushing it out. */
4142 siginfo_fixup (&siginfo, inf_siginfo, 1);
4143
4144 errno = 0;
4145 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4146 if (errno != 0)
4147 return -1;
4148 }
4149
4150 return len;
4151 }
4152
4153 static LONGEST
4154 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4155 const char *annex, gdb_byte *readbuf,
4156 const gdb_byte *writebuf,
4157 ULONGEST offset, LONGEST len)
4158 {
4159 struct cleanup *old_chain;
4160 LONGEST xfer;
4161
4162 if (object == TARGET_OBJECT_SIGNAL_INFO)
4163 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4164 offset, len);
4165
4166 /* The target is connected but no live inferior is selected. Pass
4167 this request down to a lower stratum (e.g., the executable
4168 file). */
4169 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4170 return 0;
4171
4172 old_chain = save_inferior_ptid ();
4173
4174 if (is_lwp (inferior_ptid))
4175 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4176
4177 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4178 offset, len);
4179
4180 do_cleanups (old_chain);
4181 return xfer;
4182 }
4183
4184 static int
4185 linux_thread_alive (ptid_t ptid)
4186 {
4187 int err, tmp_errno;
4188
4189 gdb_assert (is_lwp (ptid));
4190
4191 /* Send signal 0 instead of anything ptrace, because ptracing a
4192 running thread errors out claiming that the thread doesn't
4193 exist. */
4194 err = kill_lwp (GET_LWP (ptid), 0);
4195 tmp_errno = errno;
4196 if (debug_linux_nat)
4197 fprintf_unfiltered (gdb_stdlog,
4198 "LLTA: KILL(SIG0) %s (%s)\n",
4199 target_pid_to_str (ptid),
4200 err ? safe_strerror (tmp_errno) : "OK");
4201
4202 if (err != 0)
4203 return 0;
4204
4205 return 1;
4206 }
4207
4208 static int
4209 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4210 {
4211 return linux_thread_alive (ptid);
4212 }
4213
4214 static char *
4215 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4216 {
4217 static char buf[64];
4218
4219 if (is_lwp (ptid)
4220 && (GET_PID (ptid) != GET_LWP (ptid)
4221 || num_lwps (GET_PID (ptid)) > 1))
4222 {
4223 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4224 return buf;
4225 }
4226
4227 return normal_pid_to_str (ptid);
4228 }
4229
4230 static char *
4231 linux_nat_thread_name (struct thread_info *thr)
4232 {
4233 int pid = ptid_get_pid (thr->ptid);
4234 long lwp = ptid_get_lwp (thr->ptid);
4235 #define FORMAT "/proc/%d/task/%ld/comm"
4236 char buf[sizeof (FORMAT) + 30];
4237 FILE *comm_file;
4238 char *result = NULL;
4239
4240 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4241 comm_file = gdb_fopen_cloexec (buf, "r");
4242 if (comm_file)
4243 {
4244 /* Not exported by the kernel, so we define it here. */
4245 #define COMM_LEN 16
4246 static char line[COMM_LEN + 1];
4247
4248 if (fgets (line, sizeof (line), comm_file))
4249 {
4250 char *nl = strchr (line, '\n');
4251
4252 if (nl)
4253 *nl = '\0';
4254 if (*line != '\0')
4255 result = line;
4256 }
4257
4258 fclose (comm_file);
4259 }
4260
4261 #undef COMM_LEN
4262 #undef FORMAT
4263
4264 return result;
4265 }
4266
4267 /* Accepts an integer PID; Returns a string representing a file that
4268 can be opened to get the symbols for the child process. */
4269
4270 static char *
4271 linux_child_pid_to_exec_file (int pid)
4272 {
4273 char *name1, *name2;
4274
4275 name1 = xmalloc (PATH_MAX);
4276 name2 = xmalloc (PATH_MAX);
4277 make_cleanup (xfree, name1);
4278 make_cleanup (xfree, name2);
4279 memset (name2, 0, PATH_MAX);
4280
4281 sprintf (name1, "/proc/%d/exe", pid);
4282 if (readlink (name1, name2, PATH_MAX - 1) > 0)
4283 return name2;
4284 else
4285 return name1;
4286 }
4287
4288 /* Records the thread's register state for the corefile note
4289 section. */
4290
4291 static char *
4292 linux_nat_collect_thread_registers (const struct regcache *regcache,
4293 ptid_t ptid, bfd *obfd,
4294 char *note_data, int *note_size,
4295 enum gdb_signal stop_signal)
4296 {
4297 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4298 const struct regset *regset;
4299 int core_regset_p;
4300 gdb_gregset_t gregs;
4301 gdb_fpregset_t fpregs;
4302
4303 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4304
4305 if (core_regset_p
4306 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4307 sizeof (gregs)))
4308 != NULL && regset->collect_regset != NULL)
4309 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4310 else
4311 fill_gregset (regcache, &gregs, -1);
4312
4313 note_data = (char *) elfcore_write_prstatus
4314 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4315 gdb_signal_to_host (stop_signal), &gregs);
4316
4317 if (core_regset_p
4318 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4319 sizeof (fpregs)))
4320 != NULL && regset->collect_regset != NULL)
4321 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4322 else
4323 fill_fpregset (regcache, &fpregs, -1);
4324
4325 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4326 &fpregs, sizeof (fpregs));
4327
4328 return note_data;
4329 }
4330
4331 /* Fills the "to_make_corefile_note" target vector. Builds the note
4332 section for a corefile, and returns it in a malloc buffer. */
4333
4334 static char *
4335 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4336 {
4337 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4338 converted to gdbarch_core_regset_sections, this function can go away. */
4339 return linux_make_corefile_notes (target_gdbarch (), obfd, note_size,
4340 linux_nat_collect_thread_registers);
4341 }
4342
4343 /* Implement the to_xfer_partial interface for memory reads using the /proc
4344 filesystem. Because we can use a single read() call for /proc, this
4345 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4346 but it doesn't support writes. */
4347
4348 static LONGEST
4349 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4350 const char *annex, gdb_byte *readbuf,
4351 const gdb_byte *writebuf,
4352 ULONGEST offset, LONGEST len)
4353 {
4354 LONGEST ret;
4355 int fd;
4356 char filename[64];
4357
4358 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4359 return 0;
4360
4361 /* Don't bother for one word. */
4362 if (len < 3 * sizeof (long))
4363 return 0;
4364
4365 /* We could keep this file open and cache it - possibly one per
4366 thread. That requires some juggling, but is even faster. */
4367 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4368 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
4369 if (fd == -1)
4370 return 0;
4371
4372 /* If pread64 is available, use it. It's faster if the kernel
4373 supports it (only one syscall), and it's 64-bit safe even on
4374 32-bit platforms (for instance, SPARC debugging a SPARC64
4375 application). */
4376 #ifdef HAVE_PREAD64
4377 if (pread64 (fd, readbuf, len, offset) != len)
4378 #else
4379 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4380 #endif
4381 ret = 0;
4382 else
4383 ret = len;
4384
4385 close (fd);
4386 return ret;
4387 }
4388
4389
4390 /* Enumerate spufs IDs for process PID. */
4391 static LONGEST
4392 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4393 {
4394 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
4395 LONGEST pos = 0;
4396 LONGEST written = 0;
4397 char path[128];
4398 DIR *dir;
4399 struct dirent *entry;
4400
4401 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4402 dir = opendir (path);
4403 if (!dir)
4404 return -1;
4405
4406 rewinddir (dir);
4407 while ((entry = readdir (dir)) != NULL)
4408 {
4409 struct stat st;
4410 struct statfs stfs;
4411 int fd;
4412
4413 fd = atoi (entry->d_name);
4414 if (!fd)
4415 continue;
4416
4417 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4418 if (stat (path, &st) != 0)
4419 continue;
4420 if (!S_ISDIR (st.st_mode))
4421 continue;
4422
4423 if (statfs (path, &stfs) != 0)
4424 continue;
4425 if (stfs.f_type != SPUFS_MAGIC)
4426 continue;
4427
4428 if (pos >= offset && pos + 4 <= offset + len)
4429 {
4430 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4431 written += 4;
4432 }
4433 pos += 4;
4434 }
4435
4436 closedir (dir);
4437 return written;
4438 }
4439
4440 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4441 object type, using the /proc file system. */
4442 static LONGEST
4443 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4444 const char *annex, gdb_byte *readbuf,
4445 const gdb_byte *writebuf,
4446 ULONGEST offset, LONGEST len)
4447 {
4448 char buf[128];
4449 int fd = 0;
4450 int ret = -1;
4451 int pid = PIDGET (inferior_ptid);
4452
4453 if (!annex)
4454 {
4455 if (!readbuf)
4456 return -1;
4457 else
4458 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4459 }
4460
4461 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4462 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4463 if (fd <= 0)
4464 return -1;
4465
4466 if (offset != 0
4467 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4468 {
4469 close (fd);
4470 return 0;
4471 }
4472
4473 if (writebuf)
4474 ret = write (fd, writebuf, (size_t) len);
4475 else if (readbuf)
4476 ret = read (fd, readbuf, (size_t) len);
4477
4478 close (fd);
4479 return ret;
4480 }
4481
4482
4483 /* Parse LINE as a signal set and add its set bits to SIGS. */
4484
4485 static void
4486 add_line_to_sigset (const char *line, sigset_t *sigs)
4487 {
4488 int len = strlen (line) - 1;
4489 const char *p;
4490 int signum;
4491
4492 if (line[len] != '\n')
4493 error (_("Could not parse signal set: %s"), line);
4494
4495 p = line;
4496 signum = len * 4;
4497 while (len-- > 0)
4498 {
4499 int digit;
4500
4501 if (*p >= '0' && *p <= '9')
4502 digit = *p - '0';
4503 else if (*p >= 'a' && *p <= 'f')
4504 digit = *p - 'a' + 10;
4505 else
4506 error (_("Could not parse signal set: %s"), line);
4507
4508 signum -= 4;
4509
4510 if (digit & 1)
4511 sigaddset (sigs, signum + 1);
4512 if (digit & 2)
4513 sigaddset (sigs, signum + 2);
4514 if (digit & 4)
4515 sigaddset (sigs, signum + 3);
4516 if (digit & 8)
4517 sigaddset (sigs, signum + 4);
4518
4519 p++;
4520 }
4521 }
4522
4523 /* Find process PID's pending signals from /proc/pid/status and set
4524 SIGS to match. */
4525
4526 void
4527 linux_proc_pending_signals (int pid, sigset_t *pending,
4528 sigset_t *blocked, sigset_t *ignored)
4529 {
4530 FILE *procfile;
4531 char buffer[PATH_MAX], fname[PATH_MAX];
4532 struct cleanup *cleanup;
4533
4534 sigemptyset (pending);
4535 sigemptyset (blocked);
4536 sigemptyset (ignored);
4537 sprintf (fname, "/proc/%d/status", pid);
4538 procfile = gdb_fopen_cloexec (fname, "r");
4539 if (procfile == NULL)
4540 error (_("Could not open %s"), fname);
4541 cleanup = make_cleanup_fclose (procfile);
4542
4543 while (fgets (buffer, PATH_MAX, procfile) != NULL)
4544 {
4545 /* Normal queued signals are on the SigPnd line in the status
4546 file. However, 2.6 kernels also have a "shared" pending
4547 queue for delivering signals to a thread group, so check for
4548 a ShdPnd line also.
4549
4550 Unfortunately some Red Hat kernels include the shared pending
4551 queue but not the ShdPnd status field. */
4552
4553 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4554 add_line_to_sigset (buffer + 8, pending);
4555 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4556 add_line_to_sigset (buffer + 8, pending);
4557 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4558 add_line_to_sigset (buffer + 8, blocked);
4559 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4560 add_line_to_sigset (buffer + 8, ignored);
4561 }
4562
4563 do_cleanups (cleanup);
4564 }
4565
4566 static LONGEST
4567 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4568 const char *annex, gdb_byte *readbuf,
4569 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4570 {
4571 gdb_assert (object == TARGET_OBJECT_OSDATA);
4572
4573 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4574 }
4575
4576 static LONGEST
4577 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4578 const char *annex, gdb_byte *readbuf,
4579 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4580 {
4581 LONGEST xfer;
4582
4583 if (object == TARGET_OBJECT_AUXV)
4584 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4585 offset, len);
4586
4587 if (object == TARGET_OBJECT_OSDATA)
4588 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4589 offset, len);
4590
4591 if (object == TARGET_OBJECT_SPU)
4592 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4593 offset, len);
4594
4595 /* GDB calculates all the addresses in possibly larget width of the address.
4596 Address width needs to be masked before its final use - either by
4597 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4598
4599 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4600
4601 if (object == TARGET_OBJECT_MEMORY)
4602 {
4603 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4604
4605 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4606 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4607 }
4608
4609 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4610 offset, len);
4611 if (xfer != 0)
4612 return xfer;
4613
4614 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4615 offset, len);
4616 }
4617
4618 static void
4619 cleanup_target_stop (void *arg)
4620 {
4621 ptid_t *ptid = (ptid_t *) arg;
4622
4623 gdb_assert (arg != NULL);
4624
4625 /* Unpause all */
4626 target_resume (*ptid, 0, GDB_SIGNAL_0);
4627 }
4628
4629 static VEC(static_tracepoint_marker_p) *
4630 linux_child_static_tracepoint_markers_by_strid (const char *strid)
4631 {
4632 char s[IPA_CMD_BUF_SIZE];
4633 struct cleanup *old_chain;
4634 int pid = ptid_get_pid (inferior_ptid);
4635 VEC(static_tracepoint_marker_p) *markers = NULL;
4636 struct static_tracepoint_marker *marker = NULL;
4637 char *p = s;
4638 ptid_t ptid = ptid_build (pid, 0, 0);
4639
4640 /* Pause all */
4641 target_stop (ptid);
4642
4643 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4644 s[sizeof ("qTfSTM")] = 0;
4645
4646 agent_run_command (pid, s, strlen (s) + 1);
4647
4648 old_chain = make_cleanup (free_current_marker, &marker);
4649 make_cleanup (cleanup_target_stop, &ptid);
4650
4651 while (*p++ == 'm')
4652 {
4653 if (marker == NULL)
4654 marker = XCNEW (struct static_tracepoint_marker);
4655
4656 do
4657 {
4658 parse_static_tracepoint_marker_definition (p, &p, marker);
4659
4660 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4661 {
4662 VEC_safe_push (static_tracepoint_marker_p,
4663 markers, marker);
4664 marker = NULL;
4665 }
4666 else
4667 {
4668 release_static_tracepoint_marker (marker);
4669 memset (marker, 0, sizeof (*marker));
4670 }
4671 }
4672 while (*p++ == ','); /* comma-separated list */
4673
4674 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4675 s[sizeof ("qTsSTM")] = 0;
4676 agent_run_command (pid, s, strlen (s) + 1);
4677 p = s;
4678 }
4679
4680 do_cleanups (old_chain);
4681
4682 return markers;
4683 }
4684
4685 /* Create a prototype generic GNU/Linux target. The client can override
4686 it with local methods. */
4687
4688 static void
4689 linux_target_install_ops (struct target_ops *t)
4690 {
4691 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4692 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4693 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4694 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4695 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4696 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4697 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4698 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4699 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4700 t->to_post_attach = linux_child_post_attach;
4701 t->to_follow_fork = linux_child_follow_fork;
4702 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4703
4704 super_xfer_partial = t->to_xfer_partial;
4705 t->to_xfer_partial = linux_xfer_partial;
4706
4707 t->to_static_tracepoint_markers_by_strid
4708 = linux_child_static_tracepoint_markers_by_strid;
4709 }
4710
4711 struct target_ops *
4712 linux_target (void)
4713 {
4714 struct target_ops *t;
4715
4716 t = inf_ptrace_target ();
4717 linux_target_install_ops (t);
4718
4719 return t;
4720 }
4721
4722 struct target_ops *
4723 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4724 {
4725 struct target_ops *t;
4726
4727 t = inf_ptrace_trad_target (register_u_offset);
4728 linux_target_install_ops (t);
4729
4730 return t;
4731 }
4732
4733 /* target_is_async_p implementation. */
4734
4735 static int
4736 linux_nat_is_async_p (void)
4737 {
4738 /* NOTE: palves 2008-03-21: We're only async when the user requests
4739 it explicitly with the "set target-async" command.
4740 Someday, linux will always be async. */
4741 return target_async_permitted;
4742 }
4743
4744 /* target_can_async_p implementation. */
4745
4746 static int
4747 linux_nat_can_async_p (void)
4748 {
4749 /* NOTE: palves 2008-03-21: We're only async when the user requests
4750 it explicitly with the "set target-async" command.
4751 Someday, linux will always be async. */
4752 return target_async_permitted;
4753 }
4754
4755 static int
4756 linux_nat_supports_non_stop (void)
4757 {
4758 return 1;
4759 }
4760
4761 /* True if we want to support multi-process. To be removed when GDB
4762 supports multi-exec. */
4763
4764 int linux_multi_process = 1;
4765
4766 static int
4767 linux_nat_supports_multi_process (void)
4768 {
4769 return linux_multi_process;
4770 }
4771
4772 static int
4773 linux_nat_supports_disable_randomization (void)
4774 {
4775 #ifdef HAVE_PERSONALITY
4776 return 1;
4777 #else
4778 return 0;
4779 #endif
4780 }
4781
4782 static int async_terminal_is_ours = 1;
4783
4784 /* target_terminal_inferior implementation. */
4785
4786 static void
4787 linux_nat_terminal_inferior (void)
4788 {
4789 if (!target_is_async_p ())
4790 {
4791 /* Async mode is disabled. */
4792 terminal_inferior ();
4793 return;
4794 }
4795
4796 terminal_inferior ();
4797
4798 /* Calls to target_terminal_*() are meant to be idempotent. */
4799 if (!async_terminal_is_ours)
4800 return;
4801
4802 delete_file_handler (input_fd);
4803 async_terminal_is_ours = 0;
4804 set_sigint_trap ();
4805 }
4806
4807 /* target_terminal_ours implementation. */
4808
4809 static void
4810 linux_nat_terminal_ours (void)
4811 {
4812 if (!target_is_async_p ())
4813 {
4814 /* Async mode is disabled. */
4815 terminal_ours ();
4816 return;
4817 }
4818
4819 /* GDB should never give the terminal to the inferior if the
4820 inferior is running in the background (run&, continue&, etc.),
4821 but claiming it sure should. */
4822 terminal_ours ();
4823
4824 if (async_terminal_is_ours)
4825 return;
4826
4827 clear_sigint_trap ();
4828 add_file_handler (input_fd, stdin_event_handler, 0);
4829 async_terminal_is_ours = 1;
4830 }
4831
4832 static void (*async_client_callback) (enum inferior_event_type event_type,
4833 void *context);
4834 static void *async_client_context;
4835
4836 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4837 so we notice when any child changes state, and notify the
4838 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4839 above to wait for the arrival of a SIGCHLD. */
4840
4841 static void
4842 sigchld_handler (int signo)
4843 {
4844 int old_errno = errno;
4845
4846 if (debug_linux_nat)
4847 ui_file_write_async_safe (gdb_stdlog,
4848 "sigchld\n", sizeof ("sigchld\n") - 1);
4849
4850 if (signo == SIGCHLD
4851 && linux_nat_event_pipe[0] != -1)
4852 async_file_mark (); /* Let the event loop know that there are
4853 events to handle. */
4854
4855 errno = old_errno;
4856 }
4857
4858 /* Callback registered with the target events file descriptor. */
4859
4860 static void
4861 handle_target_event (int error, gdb_client_data client_data)
4862 {
4863 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4864 }
4865
4866 /* Create/destroy the target events pipe. Returns previous state. */
4867
4868 static int
4869 linux_async_pipe (int enable)
4870 {
4871 int previous = (linux_nat_event_pipe[0] != -1);
4872
4873 if (previous != enable)
4874 {
4875 sigset_t prev_mask;
4876
4877 /* Block child signals while we create/destroy the pipe, as
4878 their handler writes to it. */
4879 block_child_signals (&prev_mask);
4880
4881 if (enable)
4882 {
4883 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4884 internal_error (__FILE__, __LINE__,
4885 "creating event pipe failed.");
4886
4887 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4888 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4889 }
4890 else
4891 {
4892 close (linux_nat_event_pipe[0]);
4893 close (linux_nat_event_pipe[1]);
4894 linux_nat_event_pipe[0] = -1;
4895 linux_nat_event_pipe[1] = -1;
4896 }
4897
4898 restore_child_signals_mask (&prev_mask);
4899 }
4900
4901 return previous;
4902 }
4903
4904 /* target_async implementation. */
4905
4906 static void
4907 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4908 void *context), void *context)
4909 {
4910 if (callback != NULL)
4911 {
4912 async_client_callback = callback;
4913 async_client_context = context;
4914 if (!linux_async_pipe (1))
4915 {
4916 add_file_handler (linux_nat_event_pipe[0],
4917 handle_target_event, NULL);
4918 /* There may be pending events to handle. Tell the event loop
4919 to poll them. */
4920 async_file_mark ();
4921 }
4922 }
4923 else
4924 {
4925 async_client_callback = callback;
4926 async_client_context = context;
4927 delete_file_handler (linux_nat_event_pipe[0]);
4928 linux_async_pipe (0);
4929 }
4930 return;
4931 }
4932
4933 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4934 event came out. */
4935
4936 static int
4937 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4938 {
4939 if (!lwp->stopped)
4940 {
4941 if (debug_linux_nat)
4942 fprintf_unfiltered (gdb_stdlog,
4943 "LNSL: running -> suspending %s\n",
4944 target_pid_to_str (lwp->ptid));
4945
4946
4947 if (lwp->last_resume_kind == resume_stop)
4948 {
4949 if (debug_linux_nat)
4950 fprintf_unfiltered (gdb_stdlog,
4951 "linux-nat: already stopping LWP %ld at "
4952 "GDB's request\n",
4953 ptid_get_lwp (lwp->ptid));
4954 return 0;
4955 }
4956
4957 stop_callback (lwp, NULL);
4958 lwp->last_resume_kind = resume_stop;
4959 }
4960 else
4961 {
4962 /* Already known to be stopped; do nothing. */
4963
4964 if (debug_linux_nat)
4965 {
4966 if (find_thread_ptid (lwp->ptid)->stop_requested)
4967 fprintf_unfiltered (gdb_stdlog,
4968 "LNSL: already stopped/stop_requested %s\n",
4969 target_pid_to_str (lwp->ptid));
4970 else
4971 fprintf_unfiltered (gdb_stdlog,
4972 "LNSL: already stopped/no "
4973 "stop_requested yet %s\n",
4974 target_pid_to_str (lwp->ptid));
4975 }
4976 }
4977 return 0;
4978 }
4979
4980 static void
4981 linux_nat_stop (ptid_t ptid)
4982 {
4983 if (non_stop)
4984 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4985 else
4986 linux_ops->to_stop (ptid);
4987 }
4988
4989 static void
4990 linux_nat_close (void)
4991 {
4992 /* Unregister from the event loop. */
4993 if (linux_nat_is_async_p ())
4994 linux_nat_async (NULL, 0);
4995
4996 if (linux_ops->to_close)
4997 linux_ops->to_close ();
4998 }
4999
5000 /* When requests are passed down from the linux-nat layer to the
5001 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5002 used. The address space pointer is stored in the inferior object,
5003 but the common code that is passed such ptid can't tell whether
5004 lwpid is a "main" process id or not (it assumes so). We reverse
5005 look up the "main" process id from the lwp here. */
5006
5007 static struct address_space *
5008 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5009 {
5010 struct lwp_info *lwp;
5011 struct inferior *inf;
5012 int pid;
5013
5014 pid = GET_LWP (ptid);
5015 if (GET_LWP (ptid) == 0)
5016 {
5017 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5018 tgid. */
5019 lwp = find_lwp_pid (ptid);
5020 pid = GET_PID (lwp->ptid);
5021 }
5022 else
5023 {
5024 /* A (pid,lwpid,0) ptid. */
5025 pid = GET_PID (ptid);
5026 }
5027
5028 inf = find_inferior_pid (pid);
5029 gdb_assert (inf != NULL);
5030 return inf->aspace;
5031 }
5032
5033 /* Return the cached value of the processor core for thread PTID. */
5034
5035 static int
5036 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5037 {
5038 struct lwp_info *info = find_lwp_pid (ptid);
5039
5040 if (info)
5041 return info->core;
5042 return -1;
5043 }
5044
5045 void
5046 linux_nat_add_target (struct target_ops *t)
5047 {
5048 /* Save the provided single-threaded target. We save this in a separate
5049 variable because another target we've inherited from (e.g. inf-ptrace)
5050 may have saved a pointer to T; we want to use it for the final
5051 process stratum target. */
5052 linux_ops_saved = *t;
5053 linux_ops = &linux_ops_saved;
5054
5055 /* Override some methods for multithreading. */
5056 t->to_create_inferior = linux_nat_create_inferior;
5057 t->to_attach = linux_nat_attach;
5058 t->to_detach = linux_nat_detach;
5059 t->to_resume = linux_nat_resume;
5060 t->to_wait = linux_nat_wait;
5061 t->to_pass_signals = linux_nat_pass_signals;
5062 t->to_xfer_partial = linux_nat_xfer_partial;
5063 t->to_kill = linux_nat_kill;
5064 t->to_mourn_inferior = linux_nat_mourn_inferior;
5065 t->to_thread_alive = linux_nat_thread_alive;
5066 t->to_pid_to_str = linux_nat_pid_to_str;
5067 t->to_thread_name = linux_nat_thread_name;
5068 t->to_has_thread_control = tc_schedlock;
5069 t->to_thread_address_space = linux_nat_thread_address_space;
5070 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5071 t->to_stopped_data_address = linux_nat_stopped_data_address;
5072
5073 t->to_can_async_p = linux_nat_can_async_p;
5074 t->to_is_async_p = linux_nat_is_async_p;
5075 t->to_supports_non_stop = linux_nat_supports_non_stop;
5076 t->to_async = linux_nat_async;
5077 t->to_terminal_inferior = linux_nat_terminal_inferior;
5078 t->to_terminal_ours = linux_nat_terminal_ours;
5079 t->to_close = linux_nat_close;
5080
5081 /* Methods for non-stop support. */
5082 t->to_stop = linux_nat_stop;
5083
5084 t->to_supports_multi_process = linux_nat_supports_multi_process;
5085
5086 t->to_supports_disable_randomization
5087 = linux_nat_supports_disable_randomization;
5088
5089 t->to_core_of_thread = linux_nat_core_of_thread;
5090
5091 /* We don't change the stratum; this target will sit at
5092 process_stratum and thread_db will set at thread_stratum. This
5093 is a little strange, since this is a multi-threaded-capable
5094 target, but we want to be on the stack below thread_db, and we
5095 also want to be used for single-threaded processes. */
5096
5097 add_target (t);
5098 }
5099
5100 /* Register a method to call whenever a new thread is attached. */
5101 void
5102 linux_nat_set_new_thread (struct target_ops *t,
5103 void (*new_thread) (struct lwp_info *))
5104 {
5105 /* Save the pointer. We only support a single registered instance
5106 of the GNU/Linux native target, so we do not need to map this to
5107 T. */
5108 linux_nat_new_thread = new_thread;
5109 }
5110
5111 /* See declaration in linux-nat.h. */
5112
5113 void
5114 linux_nat_set_new_fork (struct target_ops *t,
5115 linux_nat_new_fork_ftype *new_fork)
5116 {
5117 /* Save the pointer. */
5118 linux_nat_new_fork = new_fork;
5119 }
5120
5121 /* See declaration in linux-nat.h. */
5122
5123 void
5124 linux_nat_set_forget_process (struct target_ops *t,
5125 linux_nat_forget_process_ftype *fn)
5126 {
5127 /* Save the pointer. */
5128 linux_nat_forget_process_hook = fn;
5129 }
5130
5131 /* See declaration in linux-nat.h. */
5132
5133 void
5134 linux_nat_forget_process (pid_t pid)
5135 {
5136 if (linux_nat_forget_process_hook != NULL)
5137 linux_nat_forget_process_hook (pid);
5138 }
5139
5140 /* Register a method that converts a siginfo object between the layout
5141 that ptrace returns, and the layout in the architecture of the
5142 inferior. */
5143 void
5144 linux_nat_set_siginfo_fixup (struct target_ops *t,
5145 int (*siginfo_fixup) (siginfo_t *,
5146 gdb_byte *,
5147 int))
5148 {
5149 /* Save the pointer. */
5150 linux_nat_siginfo_fixup = siginfo_fixup;
5151 }
5152
5153 /* Register a method to call prior to resuming a thread. */
5154
5155 void
5156 linux_nat_set_prepare_to_resume (struct target_ops *t,
5157 void (*prepare_to_resume) (struct lwp_info *))
5158 {
5159 /* Save the pointer. */
5160 linux_nat_prepare_to_resume = prepare_to_resume;
5161 }
5162
5163 /* See linux-nat.h. */
5164
5165 int
5166 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
5167 {
5168 int pid;
5169
5170 pid = GET_LWP (ptid);
5171 if (pid == 0)
5172 pid = GET_PID (ptid);
5173
5174 errno = 0;
5175 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
5176 if (errno != 0)
5177 {
5178 memset (siginfo, 0, sizeof (*siginfo));
5179 return 0;
5180 }
5181 return 1;
5182 }
5183
5184 /* Provide a prototype to silence -Wmissing-prototypes. */
5185 extern initialize_file_ftype _initialize_linux_nat;
5186
5187 void
5188 _initialize_linux_nat (void)
5189 {
5190 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
5191 &debug_linux_nat, _("\
5192 Set debugging of GNU/Linux lwp module."), _("\
5193 Show debugging of GNU/Linux lwp module."), _("\
5194 Enables printf debugging output."),
5195 NULL,
5196 show_debug_linux_nat,
5197 &setdebuglist, &showdebuglist);
5198
5199 /* Save this mask as the default. */
5200 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5201
5202 /* Install a SIGCHLD handler. */
5203 sigchld_action.sa_handler = sigchld_handler;
5204 sigemptyset (&sigchld_action.sa_mask);
5205 sigchld_action.sa_flags = SA_RESTART;
5206
5207 /* Make it the default. */
5208 sigaction (SIGCHLD, &sigchld_action, NULL);
5209
5210 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5211 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5212 sigdelset (&suspend_mask, SIGCHLD);
5213
5214 sigemptyset (&blocked_mask);
5215 }
5216 \f
5217
5218 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5219 the GNU/Linux Threads library and therefore doesn't really belong
5220 here. */
5221
5222 /* Read variable NAME in the target and return its value if found.
5223 Otherwise return zero. It is assumed that the type of the variable
5224 is `int'. */
5225
5226 static int
5227 get_signo (const char *name)
5228 {
5229 struct minimal_symbol *ms;
5230 int signo;
5231
5232 ms = lookup_minimal_symbol (name, NULL, NULL);
5233 if (ms == NULL)
5234 return 0;
5235
5236 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5237 sizeof (signo)) != 0)
5238 return 0;
5239
5240 return signo;
5241 }
5242
5243 /* Return the set of signals used by the threads library in *SET. */
5244
5245 void
5246 lin_thread_get_thread_signals (sigset_t *set)
5247 {
5248 struct sigaction action;
5249 int restart, cancel;
5250
5251 sigemptyset (&blocked_mask);
5252 sigemptyset (set);
5253
5254 restart = get_signo ("__pthread_sig_restart");
5255 cancel = get_signo ("__pthread_sig_cancel");
5256
5257 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5258 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5259 not provide any way for the debugger to query the signal numbers -
5260 fortunately they don't change! */
5261
5262 if (restart == 0)
5263 restart = __SIGRTMIN;
5264
5265 if (cancel == 0)
5266 cancel = __SIGRTMIN + 1;
5267
5268 sigaddset (set, restart);
5269 sigaddset (set, cancel);
5270
5271 /* The GNU/Linux Threads library makes terminating threads send a
5272 special "cancel" signal instead of SIGCHLD. Make sure we catch
5273 those (to prevent them from terminating GDB itself, which is
5274 likely to be their default action) and treat them the same way as
5275 SIGCHLD. */
5276
5277 action.sa_handler = sigchld_handler;
5278 sigemptyset (&action.sa_mask);
5279 action.sa_flags = SA_RESTART;
5280 sigaction (cancel, &action, NULL);
5281
5282 /* We block the "cancel" signal throughout this code ... */
5283 sigaddset (&blocked_mask, cancel);
5284 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5285
5286 /* ... except during a sigsuspend. */
5287 sigdelset (&suspend_mask, cancel);
5288 }
This page took 0.136944 seconds and 4 git commands to generate.