* linux-thread-db.c (thread_db_mourn_inferior): Remove breakpoints
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
21
22 #include "defs.h"
23 #include "inferior.h"
24 #include "target.h"
25 #include "gdb_string.h"
26 #include "gdb_wait.h"
27 #include "gdb_assert.h"
28 #ifdef HAVE_TKILL_SYSCALL
29 #include <unistd.h>
30 #include <sys/syscall.h>
31 #endif
32 #include <sys/ptrace.h>
33 #include "linux-nat.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49
50 #ifndef O_LARGEFILE
51 #define O_LARGEFILE 0
52 #endif
53
54 /* If the system headers did not provide the constants, hard-code the normal
55 values. */
56 #ifndef PTRACE_EVENT_FORK
57
58 #define PTRACE_SETOPTIONS 0x4200
59 #define PTRACE_GETEVENTMSG 0x4201
60
61 /* options set using PTRACE_SETOPTIONS */
62 #define PTRACE_O_TRACESYSGOOD 0x00000001
63 #define PTRACE_O_TRACEFORK 0x00000002
64 #define PTRACE_O_TRACEVFORK 0x00000004
65 #define PTRACE_O_TRACECLONE 0x00000008
66 #define PTRACE_O_TRACEEXEC 0x00000010
67 #define PTRACE_O_TRACEVFORKDONE 0x00000020
68 #define PTRACE_O_TRACEEXIT 0x00000040
69
70 /* Wait extended result codes for the above trace options. */
71 #define PTRACE_EVENT_FORK 1
72 #define PTRACE_EVENT_VFORK 2
73 #define PTRACE_EVENT_CLONE 3
74 #define PTRACE_EVENT_EXEC 4
75 #define PTRACE_EVENT_VFORK_DONE 5
76 #define PTRACE_EVENT_EXIT 6
77
78 #endif /* PTRACE_EVENT_FORK */
79
80 /* We can't always assume that this flag is available, but all systems
81 with the ptrace event handlers also have __WALL, so it's safe to use
82 here. */
83 #ifndef __WALL
84 #define __WALL 0x40000000 /* Wait for any child. */
85 #endif
86
87 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
88 the use of the multi-threaded target. */
89 static struct target_ops *linux_ops;
90
91 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
92 Called by our to_xfer_partial. */
93 static LONGEST (*super_xfer_partial) (struct target_ops *,
94 enum target_object,
95 const char *, gdb_byte *,
96 const gdb_byte *,
97 ULONGEST, LONGEST);
98
99 /* The saved to_mourn_inferior method, inherited from inf-ptrace.c.
100 Called by our to_mourn_inferior. */
101 static void (*super_mourn_inferior) (void);
102
103 static int debug_linux_nat;
104 static void
105 show_debug_linux_nat (struct ui_file *file, int from_tty,
106 struct cmd_list_element *c, const char *value)
107 {
108 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
109 value);
110 }
111
112 static int linux_parent_pid;
113
114 struct simple_pid_list
115 {
116 int pid;
117 struct simple_pid_list *next;
118 };
119 struct simple_pid_list *stopped_pids;
120
121 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
122 can not be used, 1 if it can. */
123
124 static int linux_supports_tracefork_flag = -1;
125
126 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
127 PTRACE_O_TRACEVFORKDONE. */
128
129 static int linux_supports_tracevforkdone_flag = -1;
130
131 \f
132 /* Trivial list manipulation functions to keep track of a list of
133 new stopped processes. */
134 static void
135 add_to_pid_list (struct simple_pid_list **listp, int pid)
136 {
137 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
138 new_pid->pid = pid;
139 new_pid->next = *listp;
140 *listp = new_pid;
141 }
142
143 static int
144 pull_pid_from_list (struct simple_pid_list **listp, int pid)
145 {
146 struct simple_pid_list **p;
147
148 for (p = listp; *p != NULL; p = &(*p)->next)
149 if ((*p)->pid == pid)
150 {
151 struct simple_pid_list *next = (*p)->next;
152 xfree (*p);
153 *p = next;
154 return 1;
155 }
156 return 0;
157 }
158
159 void
160 linux_record_stopped_pid (int pid)
161 {
162 add_to_pid_list (&stopped_pids, pid);
163 }
164
165 \f
166 /* A helper function for linux_test_for_tracefork, called after fork (). */
167
168 static void
169 linux_tracefork_child (void)
170 {
171 int ret;
172
173 ptrace (PTRACE_TRACEME, 0, 0, 0);
174 kill (getpid (), SIGSTOP);
175 fork ();
176 _exit (0);
177 }
178
179 /* Wrapper function for waitpid which handles EINTR. */
180
181 static int
182 my_waitpid (int pid, int *status, int flags)
183 {
184 int ret;
185 do
186 {
187 ret = waitpid (pid, status, flags);
188 }
189 while (ret == -1 && errno == EINTR);
190
191 return ret;
192 }
193
194 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
195
196 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
197 we know that the feature is not available. This may change the tracing
198 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
199
200 However, if it succeeds, we don't know for sure that the feature is
201 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
202 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
203 fork tracing, and let it fork. If the process exits, we assume that we
204 can't use TRACEFORK; if we get the fork notification, and we can extract
205 the new child's PID, then we assume that we can. */
206
207 static void
208 linux_test_for_tracefork (int original_pid)
209 {
210 int child_pid, ret, status;
211 long second_pid;
212
213 linux_supports_tracefork_flag = 0;
214 linux_supports_tracevforkdone_flag = 0;
215
216 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
217 if (ret != 0)
218 return;
219
220 child_pid = fork ();
221 if (child_pid == -1)
222 perror_with_name (("fork"));
223
224 if (child_pid == 0)
225 linux_tracefork_child ();
226
227 ret = my_waitpid (child_pid, &status, 0);
228 if (ret == -1)
229 perror_with_name (("waitpid"));
230 else if (ret != child_pid)
231 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
232 if (! WIFSTOPPED (status))
233 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
234
235 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
236 if (ret != 0)
237 {
238 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
239 if (ret != 0)
240 {
241 warning (_("linux_test_for_tracefork: failed to kill child"));
242 return;
243 }
244
245 ret = my_waitpid (child_pid, &status, 0);
246 if (ret != child_pid)
247 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
248 else if (!WIFSIGNALED (status))
249 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
250 "killed child"), status);
251
252 return;
253 }
254
255 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
256 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
257 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
258 linux_supports_tracevforkdone_flag = (ret == 0);
259
260 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
261 if (ret != 0)
262 warning (_("linux_test_for_tracefork: failed to resume child"));
263
264 ret = my_waitpid (child_pid, &status, 0);
265
266 if (ret == child_pid && WIFSTOPPED (status)
267 && status >> 16 == PTRACE_EVENT_FORK)
268 {
269 second_pid = 0;
270 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
271 if (ret == 0 && second_pid != 0)
272 {
273 int second_status;
274
275 linux_supports_tracefork_flag = 1;
276 my_waitpid (second_pid, &second_status, 0);
277 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
278 if (ret != 0)
279 warning (_("linux_test_for_tracefork: failed to kill second child"));
280 }
281 }
282 else
283 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
284 "(%d, status 0x%x)"), ret, status);
285
286 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
287 if (ret != 0)
288 warning (_("linux_test_for_tracefork: failed to kill child"));
289 my_waitpid (child_pid, &status, 0);
290 }
291
292 /* Return non-zero iff we have tracefork functionality available.
293 This function also sets linux_supports_tracefork_flag. */
294
295 static int
296 linux_supports_tracefork (int pid)
297 {
298 if (linux_supports_tracefork_flag == -1)
299 linux_test_for_tracefork (pid);
300 return linux_supports_tracefork_flag;
301 }
302
303 static int
304 linux_supports_tracevforkdone (int pid)
305 {
306 if (linux_supports_tracefork_flag == -1)
307 linux_test_for_tracefork (pid);
308 return linux_supports_tracevforkdone_flag;
309 }
310
311 \f
312 void
313 linux_enable_event_reporting (ptid_t ptid)
314 {
315 int pid = ptid_get_lwp (ptid);
316 int options;
317
318 if (pid == 0)
319 pid = ptid_get_pid (ptid);
320
321 if (! linux_supports_tracefork (pid))
322 return;
323
324 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
325 | PTRACE_O_TRACECLONE;
326 if (linux_supports_tracevforkdone (pid))
327 options |= PTRACE_O_TRACEVFORKDONE;
328
329 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
330 read-only process state. */
331
332 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
333 }
334
335 void
336 child_post_attach (int pid)
337 {
338 linux_enable_event_reporting (pid_to_ptid (pid));
339 }
340
341 static void
342 linux_child_post_startup_inferior (ptid_t ptid)
343 {
344 linux_enable_event_reporting (ptid);
345 }
346
347 int
348 child_follow_fork (struct target_ops *ops, int follow_child)
349 {
350 ptid_t last_ptid;
351 struct target_waitstatus last_status;
352 int has_vforked;
353 int parent_pid, child_pid;
354
355 get_last_target_status (&last_ptid, &last_status);
356 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
357 parent_pid = ptid_get_lwp (last_ptid);
358 if (parent_pid == 0)
359 parent_pid = ptid_get_pid (last_ptid);
360 child_pid = last_status.value.related_pid;
361
362 if (! follow_child)
363 {
364 /* We're already attached to the parent, by default. */
365
366 /* Before detaching from the child, remove all breakpoints from
367 it. (This won't actually modify the breakpoint list, but will
368 physically remove the breakpoints from the child.) */
369 /* If we vforked this will remove the breakpoints from the parent
370 also, but they'll be reinserted below. */
371 detach_breakpoints (child_pid);
372
373 /* Detach new forked process? */
374 if (detach_fork)
375 {
376 if (debug_linux_nat)
377 {
378 target_terminal_ours ();
379 fprintf_filtered (gdb_stdlog,
380 "Detaching after fork from child process %d.\n",
381 child_pid);
382 }
383
384 ptrace (PTRACE_DETACH, child_pid, 0, 0);
385 }
386 else
387 {
388 struct fork_info *fp;
389 /* Retain child fork in ptrace (stopped) state. */
390 fp = find_fork_pid (child_pid);
391 if (!fp)
392 fp = add_fork (child_pid);
393 fork_save_infrun_state (fp, 0);
394 }
395
396 if (has_vforked)
397 {
398 gdb_assert (linux_supports_tracefork_flag >= 0);
399 if (linux_supports_tracevforkdone (0))
400 {
401 int status;
402
403 ptrace (PTRACE_CONT, parent_pid, 0, 0);
404 my_waitpid (parent_pid, &status, __WALL);
405 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
406 warning (_("Unexpected waitpid result %06x when waiting for "
407 "vfork-done"), status);
408 }
409 else
410 {
411 /* We can't insert breakpoints until the child has
412 finished with the shared memory region. We need to
413 wait until that happens. Ideal would be to just
414 call:
415 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
416 - waitpid (parent_pid, &status, __WALL);
417 However, most architectures can't handle a syscall
418 being traced on the way out if it wasn't traced on
419 the way in.
420
421 We might also think to loop, continuing the child
422 until it exits or gets a SIGTRAP. One problem is
423 that the child might call ptrace with PTRACE_TRACEME.
424
425 There's no simple and reliable way to figure out when
426 the vforked child will be done with its copy of the
427 shared memory. We could step it out of the syscall,
428 two instructions, let it go, and then single-step the
429 parent once. When we have hardware single-step, this
430 would work; with software single-step it could still
431 be made to work but we'd have to be able to insert
432 single-step breakpoints in the child, and we'd have
433 to insert -just- the single-step breakpoint in the
434 parent. Very awkward.
435
436 In the end, the best we can do is to make sure it
437 runs for a little while. Hopefully it will be out of
438 range of any breakpoints we reinsert. Usually this
439 is only the single-step breakpoint at vfork's return
440 point. */
441
442 usleep (10000);
443 }
444
445 /* Since we vforked, breakpoints were removed in the parent
446 too. Put them back. */
447 reattach_breakpoints (parent_pid);
448 }
449 }
450 else
451 {
452 char child_pid_spelling[40];
453
454 /* Needed to keep the breakpoint lists in sync. */
455 if (! has_vforked)
456 detach_breakpoints (child_pid);
457
458 /* Before detaching from the parent, remove all breakpoints from it. */
459 remove_breakpoints ();
460
461 if (debug_linux_nat)
462 {
463 target_terminal_ours ();
464 fprintf_filtered (gdb_stdlog,
465 "Attaching after fork to child process %d.\n",
466 child_pid);
467 }
468
469 /* If we're vforking, we may want to hold on to the parent until
470 the child exits or execs. At exec time we can remove the old
471 breakpoints from the parent and detach it; at exit time we
472 could do the same (or even, sneakily, resume debugging it - the
473 child's exec has failed, or something similar).
474
475 This doesn't clean up "properly", because we can't call
476 target_detach, but that's OK; if the current target is "child",
477 then it doesn't need any further cleanups, and lin_lwp will
478 generally not encounter vfork (vfork is defined to fork
479 in libpthread.so).
480
481 The holding part is very easy if we have VFORKDONE events;
482 but keeping track of both processes is beyond GDB at the
483 moment. So we don't expose the parent to the rest of GDB.
484 Instead we quietly hold onto it until such time as we can
485 safely resume it. */
486
487 if (has_vforked)
488 linux_parent_pid = parent_pid;
489 else if (!detach_fork)
490 {
491 struct fork_info *fp;
492 /* Retain parent fork in ptrace (stopped) state. */
493 fp = find_fork_pid (parent_pid);
494 if (!fp)
495 fp = add_fork (parent_pid);
496 fork_save_infrun_state (fp, 0);
497 }
498 else
499 {
500 target_detach (NULL, 0);
501 }
502
503 inferior_ptid = pid_to_ptid (child_pid);
504
505 /* Reinstall ourselves, since we might have been removed in
506 target_detach (which does other necessary cleanup). */
507
508 push_target (ops);
509
510 /* Reset breakpoints in the child as appropriate. */
511 follow_inferior_reset_breakpoints ();
512 }
513
514 return 0;
515 }
516
517 ptid_t
518 linux_handle_extended_wait (int pid, int status,
519 struct target_waitstatus *ourstatus)
520 {
521 int event = status >> 16;
522
523 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
524 || event == PTRACE_EVENT_CLONE)
525 {
526 unsigned long new_pid;
527 int ret;
528
529 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
530
531 /* If we haven't already seen the new PID stop, wait for it now. */
532 if (! pull_pid_from_list (&stopped_pids, new_pid))
533 {
534 /* The new child has a pending SIGSTOP. We can't affect it until it
535 hits the SIGSTOP, but we're already attached. */
536 ret = my_waitpid (new_pid, &status,
537 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
538 if (ret == -1)
539 perror_with_name (_("waiting for new child"));
540 else if (ret != new_pid)
541 internal_error (__FILE__, __LINE__,
542 _("wait returned unexpected PID %d"), ret);
543 else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP)
544 internal_error (__FILE__, __LINE__,
545 _("wait returned unexpected status 0x%x"), status);
546 }
547
548 if (event == PTRACE_EVENT_FORK)
549 ourstatus->kind = TARGET_WAITKIND_FORKED;
550 else if (event == PTRACE_EVENT_VFORK)
551 ourstatus->kind = TARGET_WAITKIND_VFORKED;
552 else
553 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
554
555 ourstatus->value.related_pid = new_pid;
556 return inferior_ptid;
557 }
558
559 if (event == PTRACE_EVENT_EXEC)
560 {
561 ourstatus->kind = TARGET_WAITKIND_EXECD;
562 ourstatus->value.execd_pathname
563 = xstrdup (child_pid_to_exec_file (pid));
564
565 if (linux_parent_pid)
566 {
567 detach_breakpoints (linux_parent_pid);
568 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
569
570 linux_parent_pid = 0;
571 }
572
573 return inferior_ptid;
574 }
575
576 internal_error (__FILE__, __LINE__,
577 _("unknown ptrace event %d"), event);
578 }
579
580 \f
581 void
582 child_insert_fork_catchpoint (int pid)
583 {
584 if (! linux_supports_tracefork (pid))
585 error (_("Your system does not support fork catchpoints."));
586 }
587
588 void
589 child_insert_vfork_catchpoint (int pid)
590 {
591 if (!linux_supports_tracefork (pid))
592 error (_("Your system does not support vfork catchpoints."));
593 }
594
595 void
596 child_insert_exec_catchpoint (int pid)
597 {
598 if (!linux_supports_tracefork (pid))
599 error (_("Your system does not support exec catchpoints."));
600 }
601
602 void
603 kill_inferior (void)
604 {
605 int status;
606 int pid = PIDGET (inferior_ptid);
607 struct target_waitstatus last;
608 ptid_t last_ptid;
609 int ret;
610
611 if (pid == 0)
612 return;
613
614 /* First cut -- let's crudely do everything inline. */
615 if (forks_exist_p ())
616 {
617 linux_fork_killall ();
618 pop_target ();
619 generic_mourn_inferior ();
620 }
621 else
622 {
623 /* If we're stopped while forking and we haven't followed yet,
624 kill the other task. We need to do this first because the
625 parent will be sleeping if this is a vfork. */
626
627 get_last_target_status (&last_ptid, &last);
628
629 if (last.kind == TARGET_WAITKIND_FORKED
630 || last.kind == TARGET_WAITKIND_VFORKED)
631 {
632 ptrace (PT_KILL, last.value.related_pid, 0, 0);
633 wait (&status);
634 }
635
636 /* Kill the current process. */
637 ptrace (PT_KILL, pid, 0, 0);
638 ret = wait (&status);
639
640 /* We might get a SIGCHLD instead of an exit status. This is
641 aggravated by the first kill above - a child has just died. */
642
643 while (ret == pid && WIFSTOPPED (status))
644 {
645 ptrace (PT_KILL, pid, 0, 0);
646 ret = wait (&status);
647 }
648 target_mourn_inferior ();
649 }
650 }
651
652 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
653 are processes sharing the same VM space. A multi-threaded process
654 is basically a group of such processes. However, such a grouping
655 is almost entirely a user-space issue; the kernel doesn't enforce
656 such a grouping at all (this might change in the future). In
657 general, we'll rely on the threads library (i.e. the GNU/Linux
658 Threads library) to provide such a grouping.
659
660 It is perfectly well possible to write a multi-threaded application
661 without the assistance of a threads library, by using the clone
662 system call directly. This module should be able to give some
663 rudimentary support for debugging such applications if developers
664 specify the CLONE_PTRACE flag in the clone system call, and are
665 using the Linux kernel 2.4 or above.
666
667 Note that there are some peculiarities in GNU/Linux that affect
668 this code:
669
670 - In general one should specify the __WCLONE flag to waitpid in
671 order to make it report events for any of the cloned processes
672 (and leave it out for the initial process). However, if a cloned
673 process has exited the exit status is only reported if the
674 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
675 we cannot use it since GDB must work on older systems too.
676
677 - When a traced, cloned process exits and is waited for by the
678 debugger, the kernel reassigns it to the original parent and
679 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
680 library doesn't notice this, which leads to the "zombie problem":
681 When debugged a multi-threaded process that spawns a lot of
682 threads will run out of processes, even if the threads exit,
683 because the "zombies" stay around. */
684
685 /* List of known LWPs. */
686 static struct lwp_info *lwp_list;
687
688 /* Number of LWPs in the list. */
689 static int num_lwps;
690
691 /* Non-zero if we're running in "threaded" mode. */
692 static int threaded;
693 \f
694
695 #define GET_LWP(ptid) ptid_get_lwp (ptid)
696 #define GET_PID(ptid) ptid_get_pid (ptid)
697 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
698 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
699
700 /* If the last reported event was a SIGTRAP, this variable is set to
701 the process id of the LWP/thread that got it. */
702 ptid_t trap_ptid;
703 \f
704
705 /* This module's target-specific operations. */
706 static struct target_ops linux_nat_ops;
707
708 /* Since we cannot wait (in linux_nat_wait) for the initial process and
709 any cloned processes with a single call to waitpid, we have to use
710 the WNOHANG flag and call waitpid in a loop. To optimize
711 things a bit we use `sigsuspend' to wake us up when a process has
712 something to report (it will send us a SIGCHLD if it has). To make
713 this work we have to juggle with the signal mask. We save the
714 original signal mask such that we can restore it before creating a
715 new process in order to avoid blocking certain signals in the
716 inferior. We then block SIGCHLD during the waitpid/sigsuspend
717 loop. */
718
719 /* Original signal mask. */
720 static sigset_t normal_mask;
721
722 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
723 _initialize_linux_nat. */
724 static sigset_t suspend_mask;
725
726 /* Signals to block to make that sigsuspend work. */
727 static sigset_t blocked_mask;
728 \f
729
730 /* Prototypes for local functions. */
731 static int stop_wait_callback (struct lwp_info *lp, void *data);
732 static int linux_nat_thread_alive (ptid_t ptid);
733 \f
734 /* Convert wait status STATUS to a string. Used for printing debug
735 messages only. */
736
737 static char *
738 status_to_str (int status)
739 {
740 static char buf[64];
741
742 if (WIFSTOPPED (status))
743 snprintf (buf, sizeof (buf), "%s (stopped)",
744 strsignal (WSTOPSIG (status)));
745 else if (WIFSIGNALED (status))
746 snprintf (buf, sizeof (buf), "%s (terminated)",
747 strsignal (WSTOPSIG (status)));
748 else
749 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
750
751 return buf;
752 }
753
754 /* Initialize the list of LWPs. Note that this module, contrary to
755 what GDB's generic threads layer does for its thread list,
756 re-initializes the LWP lists whenever we mourn or detach (which
757 doesn't involve mourning) the inferior. */
758
759 static void
760 init_lwp_list (void)
761 {
762 struct lwp_info *lp, *lpnext;
763
764 for (lp = lwp_list; lp; lp = lpnext)
765 {
766 lpnext = lp->next;
767 xfree (lp);
768 }
769
770 lwp_list = NULL;
771 num_lwps = 0;
772 threaded = 0;
773 }
774
775 /* Add the LWP specified by PID to the list. If this causes the
776 number of LWPs to become larger than one, go into "threaded" mode.
777 Return a pointer to the structure describing the new LWP. */
778
779 static struct lwp_info *
780 add_lwp (ptid_t ptid)
781 {
782 struct lwp_info *lp;
783
784 gdb_assert (is_lwp (ptid));
785
786 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
787
788 memset (lp, 0, sizeof (struct lwp_info));
789
790 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
791
792 lp->ptid = ptid;
793
794 lp->next = lwp_list;
795 lwp_list = lp;
796 if (++num_lwps > 1)
797 threaded = 1;
798
799 return lp;
800 }
801
802 /* Remove the LWP specified by PID from the list. */
803
804 static void
805 delete_lwp (ptid_t ptid)
806 {
807 struct lwp_info *lp, *lpprev;
808
809 lpprev = NULL;
810
811 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
812 if (ptid_equal (lp->ptid, ptid))
813 break;
814
815 if (!lp)
816 return;
817
818 /* We don't go back to "non-threaded" mode if the number of threads
819 becomes less than two. */
820 num_lwps--;
821
822 if (lpprev)
823 lpprev->next = lp->next;
824 else
825 lwp_list = lp->next;
826
827 xfree (lp);
828 }
829
830 /* Return a pointer to the structure describing the LWP corresponding
831 to PID. If no corresponding LWP could be found, return NULL. */
832
833 static struct lwp_info *
834 find_lwp_pid (ptid_t ptid)
835 {
836 struct lwp_info *lp;
837 int lwp;
838
839 if (is_lwp (ptid))
840 lwp = GET_LWP (ptid);
841 else
842 lwp = GET_PID (ptid);
843
844 for (lp = lwp_list; lp; lp = lp->next)
845 if (lwp == GET_LWP (lp->ptid))
846 return lp;
847
848 return NULL;
849 }
850
851 /* Call CALLBACK with its second argument set to DATA for every LWP in
852 the list. If CALLBACK returns 1 for a particular LWP, return a
853 pointer to the structure describing that LWP immediately.
854 Otherwise return NULL. */
855
856 struct lwp_info *
857 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
858 {
859 struct lwp_info *lp, *lpnext;
860
861 for (lp = lwp_list; lp; lp = lpnext)
862 {
863 lpnext = lp->next;
864 if ((*callback) (lp, data))
865 return lp;
866 }
867
868 return NULL;
869 }
870
871 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
872 a message telling the user that a new LWP has been added to the
873 process. */
874
875 void
876 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
877 {
878 struct lwp_info *lp, *found_lp;
879
880 gdb_assert (is_lwp (ptid));
881
882 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
883 to interrupt either the ptrace() or waitpid() calls below. */
884 if (!sigismember (&blocked_mask, SIGCHLD))
885 {
886 sigaddset (&blocked_mask, SIGCHLD);
887 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
888 }
889
890 if (verbose)
891 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
892
893 found_lp = lp = find_lwp_pid (ptid);
894 if (lp == NULL)
895 lp = add_lwp (ptid);
896
897 /* We assume that we're already attached to any LWP that has an id
898 equal to the overall process id, and to any LWP that is already
899 in our list of LWPs. If we're not seeing exit events from threads
900 and we've had PID wraparound since we last tried to stop all threads,
901 this assumption might be wrong; fortunately, this is very unlikely
902 to happen. */
903 if (GET_LWP (ptid) != GET_PID (ptid) && found_lp == NULL)
904 {
905 pid_t pid;
906 int status;
907
908 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
909 error (_("Can't attach %s: %s"), target_pid_to_str (ptid),
910 safe_strerror (errno));
911
912 if (debug_linux_nat)
913 fprintf_unfiltered (gdb_stdlog,
914 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
915 target_pid_to_str (ptid));
916
917 pid = my_waitpid (GET_LWP (ptid), &status, 0);
918 if (pid == -1 && errno == ECHILD)
919 {
920 /* Try again with __WCLONE to check cloned processes. */
921 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
922 lp->cloned = 1;
923 }
924
925 gdb_assert (pid == GET_LWP (ptid)
926 && WIFSTOPPED (status) && WSTOPSIG (status));
927
928 child_post_attach (pid);
929
930 lp->stopped = 1;
931
932 if (debug_linux_nat)
933 {
934 fprintf_unfiltered (gdb_stdlog,
935 "LLAL: waitpid %s received %s\n",
936 target_pid_to_str (ptid),
937 status_to_str (status));
938 }
939 }
940 else
941 {
942 /* We assume that the LWP representing the original process is
943 already stopped. Mark it as stopped in the data structure
944 that the linux ptrace layer uses to keep track of threads.
945 Note that this won't have already been done since the main
946 thread will have, we assume, been stopped by an attach from a
947 different layer. */
948 lp->stopped = 1;
949 }
950 }
951
952 static void
953 linux_nat_attach (char *args, int from_tty)
954 {
955 struct lwp_info *lp;
956 pid_t pid;
957 int status;
958
959 /* FIXME: We should probably accept a list of process id's, and
960 attach all of them. */
961 linux_ops->to_attach (args, from_tty);
962
963 /* Add the initial process as the first LWP to the list. */
964 lp = add_lwp (BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid)));
965
966 /* Make sure the initial process is stopped. The user-level threads
967 layer might want to poke around in the inferior, and that won't
968 work if things haven't stabilized yet. */
969 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
970 if (pid == -1 && errno == ECHILD)
971 {
972 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
973
974 /* Try again with __WCLONE to check cloned processes. */
975 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
976 lp->cloned = 1;
977 }
978
979 gdb_assert (pid == GET_PID (inferior_ptid)
980 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
981
982 lp->stopped = 1;
983
984 /* Fake the SIGSTOP that core GDB expects. */
985 lp->status = W_STOPCODE (SIGSTOP);
986 lp->resumed = 1;
987 if (debug_linux_nat)
988 {
989 fprintf_unfiltered (gdb_stdlog,
990 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
991 }
992 }
993
994 static int
995 detach_callback (struct lwp_info *lp, void *data)
996 {
997 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
998
999 if (debug_linux_nat && lp->status)
1000 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1001 strsignal (WSTOPSIG (lp->status)),
1002 target_pid_to_str (lp->ptid));
1003
1004 while (lp->signalled && lp->stopped)
1005 {
1006 errno = 0;
1007 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1008 WSTOPSIG (lp->status)) < 0)
1009 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1010 safe_strerror (errno));
1011
1012 if (debug_linux_nat)
1013 fprintf_unfiltered (gdb_stdlog,
1014 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1015 target_pid_to_str (lp->ptid),
1016 status_to_str (lp->status));
1017
1018 lp->stopped = 0;
1019 lp->signalled = 0;
1020 lp->status = 0;
1021 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1022 here. But since lp->signalled was cleared above,
1023 stop_wait_callback didn't do anything; the process was left
1024 running. Shouldn't we be waiting for it to stop?
1025 I've removed the call, since stop_wait_callback now does do
1026 something when called with lp->signalled == 0. */
1027
1028 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1029 }
1030
1031 /* We don't actually detach from the LWP that has an id equal to the
1032 overall process id just yet. */
1033 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1034 {
1035 errno = 0;
1036 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1037 WSTOPSIG (lp->status)) < 0)
1038 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1039 safe_strerror (errno));
1040
1041 if (debug_linux_nat)
1042 fprintf_unfiltered (gdb_stdlog,
1043 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1044 target_pid_to_str (lp->ptid),
1045 strsignal (WSTOPSIG (lp->status)));
1046
1047 delete_lwp (lp->ptid);
1048 }
1049
1050 return 0;
1051 }
1052
1053 static void
1054 linux_nat_detach (char *args, int from_tty)
1055 {
1056 iterate_over_lwps (detach_callback, NULL);
1057
1058 /* Only the initial process should be left right now. */
1059 gdb_assert (num_lwps == 1);
1060
1061 trap_ptid = null_ptid;
1062
1063 /* Destroy LWP info; it's no longer valid. */
1064 init_lwp_list ();
1065
1066 /* Restore the original signal mask. */
1067 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1068 sigemptyset (&blocked_mask);
1069
1070 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1071 linux_ops->to_detach (args, from_tty);
1072 }
1073
1074 /* Resume LP. */
1075
1076 static int
1077 resume_callback (struct lwp_info *lp, void *data)
1078 {
1079 if (lp->stopped && lp->status == 0)
1080 {
1081 struct thread_info *tp;
1082
1083 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1084 0, TARGET_SIGNAL_0);
1085 if (debug_linux_nat)
1086 fprintf_unfiltered (gdb_stdlog,
1087 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1088 target_pid_to_str (lp->ptid));
1089 lp->stopped = 0;
1090 lp->step = 0;
1091 }
1092
1093 return 0;
1094 }
1095
1096 static int
1097 resume_clear_callback (struct lwp_info *lp, void *data)
1098 {
1099 lp->resumed = 0;
1100 return 0;
1101 }
1102
1103 static int
1104 resume_set_callback (struct lwp_info *lp, void *data)
1105 {
1106 lp->resumed = 1;
1107 return 0;
1108 }
1109
1110 static void
1111 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1112 {
1113 struct lwp_info *lp;
1114 int resume_all;
1115
1116 if (debug_linux_nat)
1117 fprintf_unfiltered (gdb_stdlog,
1118 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1119 step ? "step" : "resume",
1120 target_pid_to_str (ptid),
1121 signo ? strsignal (signo) : "0",
1122 target_pid_to_str (inferior_ptid));
1123
1124 /* A specific PTID means `step only this process id'. */
1125 resume_all = (PIDGET (ptid) == -1);
1126
1127 if (resume_all)
1128 iterate_over_lwps (resume_set_callback, NULL);
1129 else
1130 iterate_over_lwps (resume_clear_callback, NULL);
1131
1132 /* If PID is -1, it's the current inferior that should be
1133 handled specially. */
1134 if (PIDGET (ptid) == -1)
1135 ptid = inferior_ptid;
1136
1137 lp = find_lwp_pid (ptid);
1138 if (lp)
1139 {
1140 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1141
1142 /* Remember if we're stepping. */
1143 lp->step = step;
1144
1145 /* Mark this LWP as resumed. */
1146 lp->resumed = 1;
1147
1148 /* If we have a pending wait status for this thread, there is no
1149 point in resuming the process. But first make sure that
1150 linux_nat_wait won't preemptively handle the event - we
1151 should never take this short-circuit if we are going to
1152 leave LP running, since we have skipped resuming all the
1153 other threads. This bit of code needs to be synchronized
1154 with linux_nat_wait. */
1155
1156 if (lp->status && WIFSTOPPED (lp->status))
1157 {
1158 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1159
1160 if (signal_stop_state (saved_signo) == 0
1161 && signal_print_state (saved_signo) == 0
1162 && signal_pass_state (saved_signo) == 1)
1163 {
1164 if (debug_linux_nat)
1165 fprintf_unfiltered (gdb_stdlog,
1166 "LLR: Not short circuiting for ignored "
1167 "status 0x%x\n", lp->status);
1168
1169 /* FIXME: What should we do if we are supposed to continue
1170 this thread with a signal? */
1171 gdb_assert (signo == TARGET_SIGNAL_0);
1172 signo = saved_signo;
1173 lp->status = 0;
1174 }
1175 }
1176
1177 if (lp->status)
1178 {
1179 /* FIXME: What should we do if we are supposed to continue
1180 this thread with a signal? */
1181 gdb_assert (signo == TARGET_SIGNAL_0);
1182
1183 if (debug_linux_nat)
1184 fprintf_unfiltered (gdb_stdlog,
1185 "LLR: Short circuiting for status 0x%x\n",
1186 lp->status);
1187
1188 return;
1189 }
1190
1191 /* Mark LWP as not stopped to prevent it from being continued by
1192 resume_callback. */
1193 lp->stopped = 0;
1194 }
1195
1196 if (resume_all)
1197 iterate_over_lwps (resume_callback, NULL);
1198
1199 linux_ops->to_resume (ptid, step, signo);
1200 if (debug_linux_nat)
1201 fprintf_unfiltered (gdb_stdlog,
1202 "LLR: %s %s, %s (resume event thread)\n",
1203 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1204 target_pid_to_str (ptid),
1205 signo ? strsignal (signo) : "0");
1206 }
1207
1208 /* Issue kill to specified lwp. */
1209
1210 static int tkill_failed;
1211
1212 static int
1213 kill_lwp (int lwpid, int signo)
1214 {
1215 errno = 0;
1216
1217 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1218 fails, then we are not using nptl threads and we should be using kill. */
1219
1220 #ifdef HAVE_TKILL_SYSCALL
1221 if (!tkill_failed)
1222 {
1223 int ret = syscall (__NR_tkill, lwpid, signo);
1224 if (errno != ENOSYS)
1225 return ret;
1226 errno = 0;
1227 tkill_failed = 1;
1228 }
1229 #endif
1230
1231 return kill (lwpid, signo);
1232 }
1233
1234 /* Handle a GNU/Linux extended wait response. Most of the work we
1235 just pass off to linux_handle_extended_wait, but if it reports a
1236 clone event we need to add the new LWP to our list (and not report
1237 the trap to higher layers). This function returns non-zero if
1238 the event should be ignored and we should wait again. */
1239
1240 static int
1241 linux_nat_handle_extended (struct lwp_info *lp, int status)
1242 {
1243 linux_handle_extended_wait (GET_LWP (lp->ptid), status,
1244 &lp->waitstatus);
1245
1246 /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */
1247 if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS)
1248 {
1249 struct lwp_info *new_lp;
1250 new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid,
1251 GET_PID (inferior_ptid)));
1252 new_lp->cloned = 1;
1253 new_lp->stopped = 1;
1254
1255 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1256
1257 if (debug_linux_nat)
1258 fprintf_unfiltered (gdb_stdlog,
1259 "LLHE: Got clone event from LWP %ld, resuming\n",
1260 GET_LWP (lp->ptid));
1261 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1262
1263 return 1;
1264 }
1265
1266 return 0;
1267 }
1268
1269 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1270 exited. */
1271
1272 static int
1273 wait_lwp (struct lwp_info *lp)
1274 {
1275 pid_t pid;
1276 int status;
1277 int thread_dead = 0;
1278
1279 gdb_assert (!lp->stopped);
1280 gdb_assert (lp->status == 0);
1281
1282 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1283 if (pid == -1 && errno == ECHILD)
1284 {
1285 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1286 if (pid == -1 && errno == ECHILD)
1287 {
1288 /* The thread has previously exited. We need to delete it
1289 now because, for some vendor 2.4 kernels with NPTL
1290 support backported, there won't be an exit event unless
1291 it is the main thread. 2.6 kernels will report an exit
1292 event for each thread that exits, as expected. */
1293 thread_dead = 1;
1294 if (debug_linux_nat)
1295 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1296 target_pid_to_str (lp->ptid));
1297 }
1298 }
1299
1300 if (!thread_dead)
1301 {
1302 gdb_assert (pid == GET_LWP (lp->ptid));
1303
1304 if (debug_linux_nat)
1305 {
1306 fprintf_unfiltered (gdb_stdlog,
1307 "WL: waitpid %s received %s\n",
1308 target_pid_to_str (lp->ptid),
1309 status_to_str (status));
1310 }
1311 }
1312
1313 /* Check if the thread has exited. */
1314 if (WIFEXITED (status) || WIFSIGNALED (status))
1315 {
1316 thread_dead = 1;
1317 if (debug_linux_nat)
1318 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1319 target_pid_to_str (lp->ptid));
1320 }
1321
1322 if (thread_dead)
1323 {
1324 if (in_thread_list (lp->ptid))
1325 {
1326 /* Core GDB cannot deal with us deleting the current thread. */
1327 if (!ptid_equal (lp->ptid, inferior_ptid))
1328 delete_thread (lp->ptid);
1329 printf_unfiltered (_("[%s exited]\n"),
1330 target_pid_to_str (lp->ptid));
1331 }
1332
1333 delete_lwp (lp->ptid);
1334 return 0;
1335 }
1336
1337 gdb_assert (WIFSTOPPED (status));
1338
1339 /* Handle GNU/Linux's extended waitstatus for trace events. */
1340 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1341 {
1342 if (debug_linux_nat)
1343 fprintf_unfiltered (gdb_stdlog,
1344 "WL: Handling extended status 0x%06x\n",
1345 status);
1346 if (linux_nat_handle_extended (lp, status))
1347 return wait_lwp (lp);
1348 }
1349
1350 return status;
1351 }
1352
1353 /* Send a SIGSTOP to LP. */
1354
1355 static int
1356 stop_callback (struct lwp_info *lp, void *data)
1357 {
1358 if (!lp->stopped && !lp->signalled)
1359 {
1360 int ret;
1361
1362 if (debug_linux_nat)
1363 {
1364 fprintf_unfiltered (gdb_stdlog,
1365 "SC: kill %s **<SIGSTOP>**\n",
1366 target_pid_to_str (lp->ptid));
1367 }
1368 errno = 0;
1369 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1370 if (debug_linux_nat)
1371 {
1372 fprintf_unfiltered (gdb_stdlog,
1373 "SC: lwp kill %d %s\n",
1374 ret,
1375 errno ? safe_strerror (errno) : "ERRNO-OK");
1376 }
1377
1378 lp->signalled = 1;
1379 gdb_assert (lp->status == 0);
1380 }
1381
1382 return 0;
1383 }
1384
1385 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1386 a pointer to a set of signals to be flushed immediately. */
1387
1388 static int
1389 stop_wait_callback (struct lwp_info *lp, void *data)
1390 {
1391 sigset_t *flush_mask = data;
1392
1393 if (!lp->stopped)
1394 {
1395 int status;
1396
1397 status = wait_lwp (lp);
1398 if (status == 0)
1399 return 0;
1400
1401 /* Ignore any signals in FLUSH_MASK. */
1402 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1403 {
1404 if (!lp->signalled)
1405 {
1406 lp->stopped = 1;
1407 return 0;
1408 }
1409
1410 errno = 0;
1411 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1412 if (debug_linux_nat)
1413 fprintf_unfiltered (gdb_stdlog,
1414 "PTRACE_CONT %s, 0, 0 (%s)\n",
1415 target_pid_to_str (lp->ptid),
1416 errno ? safe_strerror (errno) : "OK");
1417
1418 return stop_wait_callback (lp, flush_mask);
1419 }
1420
1421 if (WSTOPSIG (status) != SIGSTOP)
1422 {
1423 if (WSTOPSIG (status) == SIGTRAP)
1424 {
1425 /* If a LWP other than the LWP that we're reporting an
1426 event for has hit a GDB breakpoint (as opposed to
1427 some random trap signal), then just arrange for it to
1428 hit it again later. We don't keep the SIGTRAP status
1429 and don't forward the SIGTRAP signal to the LWP. We
1430 will handle the current event, eventually we will
1431 resume all LWPs, and this one will get its breakpoint
1432 trap again.
1433
1434 If we do not do this, then we run the risk that the
1435 user will delete or disable the breakpoint, but the
1436 thread will have already tripped on it. */
1437
1438 /* Now resume this LWP and get the SIGSTOP event. */
1439 errno = 0;
1440 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1441 if (debug_linux_nat)
1442 {
1443 fprintf_unfiltered (gdb_stdlog,
1444 "PTRACE_CONT %s, 0, 0 (%s)\n",
1445 target_pid_to_str (lp->ptid),
1446 errno ? safe_strerror (errno) : "OK");
1447
1448 fprintf_unfiltered (gdb_stdlog,
1449 "SWC: Candidate SIGTRAP event in %s\n",
1450 target_pid_to_str (lp->ptid));
1451 }
1452 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1453 stop_wait_callback (lp, data);
1454 /* If there's another event, throw it back into the queue. */
1455 if (lp->status)
1456 {
1457 if (debug_linux_nat)
1458 {
1459 fprintf_unfiltered (gdb_stdlog,
1460 "SWC: kill %s, %s\n",
1461 target_pid_to_str (lp->ptid),
1462 status_to_str ((int) status));
1463 }
1464 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1465 }
1466 /* Save the sigtrap event. */
1467 lp->status = status;
1468 return 0;
1469 }
1470 else
1471 {
1472 /* The thread was stopped with a signal other than
1473 SIGSTOP, and didn't accidentally trip a breakpoint. */
1474
1475 if (debug_linux_nat)
1476 {
1477 fprintf_unfiltered (gdb_stdlog,
1478 "SWC: Pending event %s in %s\n",
1479 status_to_str ((int) status),
1480 target_pid_to_str (lp->ptid));
1481 }
1482 /* Now resume this LWP and get the SIGSTOP event. */
1483 errno = 0;
1484 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1485 if (debug_linux_nat)
1486 fprintf_unfiltered (gdb_stdlog,
1487 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1488 target_pid_to_str (lp->ptid),
1489 errno ? safe_strerror (errno) : "OK");
1490
1491 /* Hold this event/waitstatus while we check to see if
1492 there are any more (we still want to get that SIGSTOP). */
1493 stop_wait_callback (lp, data);
1494 /* If the lp->status field is still empty, use it to hold
1495 this event. If not, then this event must be returned
1496 to the event queue of the LWP. */
1497 if (lp->status == 0)
1498 lp->status = status;
1499 else
1500 {
1501 if (debug_linux_nat)
1502 {
1503 fprintf_unfiltered (gdb_stdlog,
1504 "SWC: kill %s, %s\n",
1505 target_pid_to_str (lp->ptid),
1506 status_to_str ((int) status));
1507 }
1508 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1509 }
1510 return 0;
1511 }
1512 }
1513 else
1514 {
1515 /* We caught the SIGSTOP that we intended to catch, so
1516 there's no SIGSTOP pending. */
1517 lp->stopped = 1;
1518 lp->signalled = 0;
1519 }
1520 }
1521
1522 return 0;
1523 }
1524
1525 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1526 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1527
1528 static int
1529 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1530 {
1531 sigset_t blocked, ignored;
1532 int i;
1533
1534 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1535
1536 if (!flush_mask)
1537 return 0;
1538
1539 for (i = 1; i < NSIG; i++)
1540 if (sigismember (pending, i))
1541 if (!sigismember (flush_mask, i)
1542 || sigismember (&blocked, i)
1543 || sigismember (&ignored, i))
1544 sigdelset (pending, i);
1545
1546 if (sigisemptyset (pending))
1547 return 0;
1548
1549 return 1;
1550 }
1551
1552 /* DATA is interpreted as a mask of signals to flush. If LP has
1553 signals pending, and they are all in the flush mask, then arrange
1554 to flush them. LP should be stopped, as should all other threads
1555 it might share a signal queue with. */
1556
1557 static int
1558 flush_callback (struct lwp_info *lp, void *data)
1559 {
1560 sigset_t *flush_mask = data;
1561 sigset_t pending, intersection, blocked, ignored;
1562 int pid, status;
1563
1564 /* Normally, when an LWP exits, it is removed from the LWP list. The
1565 last LWP isn't removed till later, however. So if there is only
1566 one LWP on the list, make sure it's alive. */
1567 if (lwp_list == lp && lp->next == NULL)
1568 if (!linux_nat_thread_alive (lp->ptid))
1569 return 0;
1570
1571 /* Just because the LWP is stopped doesn't mean that new signals
1572 can't arrive from outside, so this function must be careful of
1573 race conditions. However, because all threads are stopped, we
1574 can assume that the pending mask will not shrink unless we resume
1575 the LWP, and that it will then get another signal. We can't
1576 control which one, however. */
1577
1578 if (lp->status)
1579 {
1580 if (debug_linux_nat)
1581 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1582 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1583 lp->status = 0;
1584 }
1585
1586 while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1587 {
1588 int ret;
1589
1590 errno = 0;
1591 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1592 if (debug_linux_nat)
1593 fprintf_unfiltered (gdb_stderr,
1594 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1595
1596 lp->stopped = 0;
1597 stop_wait_callback (lp, flush_mask);
1598 if (debug_linux_nat)
1599 fprintf_unfiltered (gdb_stderr,
1600 "FC: Wait finished; saved status is %d\n",
1601 lp->status);
1602 }
1603
1604 return 0;
1605 }
1606
1607 /* Return non-zero if LP has a wait status pending. */
1608
1609 static int
1610 status_callback (struct lwp_info *lp, void *data)
1611 {
1612 /* Only report a pending wait status if we pretend that this has
1613 indeed been resumed. */
1614 return (lp->status != 0 && lp->resumed);
1615 }
1616
1617 /* Return non-zero if LP isn't stopped. */
1618
1619 static int
1620 running_callback (struct lwp_info *lp, void *data)
1621 {
1622 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1623 }
1624
1625 /* Count the LWP's that have had events. */
1626
1627 static int
1628 count_events_callback (struct lwp_info *lp, void *data)
1629 {
1630 int *count = data;
1631
1632 gdb_assert (count != NULL);
1633
1634 /* Count only LWPs that have a SIGTRAP event pending. */
1635 if (lp->status != 0
1636 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1637 (*count)++;
1638
1639 return 0;
1640 }
1641
1642 /* Select the LWP (if any) that is currently being single-stepped. */
1643
1644 static int
1645 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1646 {
1647 if (lp->step && lp->status != 0)
1648 return 1;
1649 else
1650 return 0;
1651 }
1652
1653 /* Select the Nth LWP that has had a SIGTRAP event. */
1654
1655 static int
1656 select_event_lwp_callback (struct lwp_info *lp, void *data)
1657 {
1658 int *selector = data;
1659
1660 gdb_assert (selector != NULL);
1661
1662 /* Select only LWPs that have a SIGTRAP event pending. */
1663 if (lp->status != 0
1664 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1665 if ((*selector)-- == 0)
1666 return 1;
1667
1668 return 0;
1669 }
1670
1671 static int
1672 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1673 {
1674 struct lwp_info *event_lp = data;
1675
1676 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1677 if (lp == event_lp)
1678 return 0;
1679
1680 /* If a LWP other than the LWP that we're reporting an event for has
1681 hit a GDB breakpoint (as opposed to some random trap signal),
1682 then just arrange for it to hit it again later. We don't keep
1683 the SIGTRAP status and don't forward the SIGTRAP signal to the
1684 LWP. We will handle the current event, eventually we will resume
1685 all LWPs, and this one will get its breakpoint trap again.
1686
1687 If we do not do this, then we run the risk that the user will
1688 delete or disable the breakpoint, but the LWP will have already
1689 tripped on it. */
1690
1691 if (lp->status != 0
1692 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1693 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1694 DECR_PC_AFTER_BREAK))
1695 {
1696 if (debug_linux_nat)
1697 fprintf_unfiltered (gdb_stdlog,
1698 "CBC: Push back breakpoint for %s\n",
1699 target_pid_to_str (lp->ptid));
1700
1701 /* Back up the PC if necessary. */
1702 if (DECR_PC_AFTER_BREAK)
1703 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid);
1704
1705 /* Throw away the SIGTRAP. */
1706 lp->status = 0;
1707 }
1708
1709 return 0;
1710 }
1711
1712 /* Select one LWP out of those that have events pending. */
1713
1714 static void
1715 select_event_lwp (struct lwp_info **orig_lp, int *status)
1716 {
1717 int num_events = 0;
1718 int random_selector;
1719 struct lwp_info *event_lp;
1720
1721 /* Record the wait status for the original LWP. */
1722 (*orig_lp)->status = *status;
1723
1724 /* Give preference to any LWP that is being single-stepped. */
1725 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1726 if (event_lp != NULL)
1727 {
1728 if (debug_linux_nat)
1729 fprintf_unfiltered (gdb_stdlog,
1730 "SEL: Select single-step %s\n",
1731 target_pid_to_str (event_lp->ptid));
1732 }
1733 else
1734 {
1735 /* No single-stepping LWP. Select one at random, out of those
1736 which have had SIGTRAP events. */
1737
1738 /* First see how many SIGTRAP events we have. */
1739 iterate_over_lwps (count_events_callback, &num_events);
1740
1741 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1742 random_selector = (int)
1743 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1744
1745 if (debug_linux_nat && num_events > 1)
1746 fprintf_unfiltered (gdb_stdlog,
1747 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1748 num_events, random_selector);
1749
1750 event_lp = iterate_over_lwps (select_event_lwp_callback,
1751 &random_selector);
1752 }
1753
1754 if (event_lp != NULL)
1755 {
1756 /* Switch the event LWP. */
1757 *orig_lp = event_lp;
1758 *status = event_lp->status;
1759 }
1760
1761 /* Flush the wait status for the event LWP. */
1762 (*orig_lp)->status = 0;
1763 }
1764
1765 /* Return non-zero if LP has been resumed. */
1766
1767 static int
1768 resumed_callback (struct lwp_info *lp, void *data)
1769 {
1770 return lp->resumed;
1771 }
1772
1773 /* Local mourn_inferior -- we need to override mourn_inferior
1774 so that we can do something clever if one of several forks
1775 has exited. */
1776
1777 static void
1778 child_mourn_inferior (void)
1779 {
1780 int status;
1781
1782 if (! forks_exist_p ())
1783 {
1784 /* Normal case, no other forks available. */
1785 super_mourn_inferior ();
1786 return;
1787 }
1788 else
1789 {
1790 /* Multi-fork case. The current inferior_ptid has exited, but
1791 there are other viable forks to debug. Delete the exiting
1792 one and context-switch to the first available. */
1793 linux_fork_mourn_inferior ();
1794 }
1795 }
1796
1797 /* We need to override child_wait to support attaching to cloned
1798 processes, since a normal wait (as done by the default version)
1799 ignores those processes. */
1800
1801 /* Wait for child PTID to do something. Return id of the child,
1802 minus_one_ptid in case of error; store status into *OURSTATUS. */
1803
1804 ptid_t
1805 child_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1806 {
1807 int save_errno;
1808 int status;
1809 pid_t pid;
1810
1811 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1812
1813 do
1814 {
1815 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1816 attached process. */
1817 set_sigio_trap ();
1818
1819 pid = my_waitpid (GET_PID (ptid), &status, 0);
1820 if (pid == -1 && errno == ECHILD)
1821 /* Try again with __WCLONE to check cloned processes. */
1822 pid = my_waitpid (GET_PID (ptid), &status, __WCLONE);
1823
1824 if (debug_linux_nat)
1825 {
1826 fprintf_unfiltered (gdb_stdlog,
1827 "CW: waitpid %ld received %s\n",
1828 (long) pid, status_to_str (status));
1829 }
1830
1831 save_errno = errno;
1832
1833 /* Make sure we don't report an event for the exit of the
1834 original program, if we've detached from it. */
1835 if (pid != -1 && !WIFSTOPPED (status) && pid != GET_PID (inferior_ptid))
1836 {
1837 pid = -1;
1838 save_errno = EINTR;
1839 }
1840
1841 /* Check for stop events reported by a process we didn't already
1842 know about - in this case, anything other than inferior_ptid.
1843
1844 If we're expecting to receive stopped processes after fork,
1845 vfork, and clone events, then we'll just add the new one to
1846 our list and go back to waiting for the event to be reported
1847 - the stopped process might be returned from waitpid before
1848 or after the event is. If we want to handle debugging of
1849 CLONE_PTRACE processes we need to do more here, i.e. switch
1850 to multi-threaded mode. */
1851 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP
1852 && pid != GET_PID (inferior_ptid))
1853 {
1854 linux_record_stopped_pid (pid);
1855 pid = -1;
1856 save_errno = EINTR;
1857 }
1858
1859 /* Handle GNU/Linux's extended waitstatus for trace events. */
1860 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
1861 && status >> 16 != 0)
1862 {
1863 linux_handle_extended_wait (pid, status, ourstatus);
1864
1865 /* If we see a clone event, detach the child, and don't
1866 report the event. It would be nice to offer some way to
1867 switch into a non-thread-db based threaded mode at this
1868 point. */
1869 if (ourstatus->kind == TARGET_WAITKIND_SPURIOUS)
1870 {
1871 ptrace (PTRACE_DETACH, ourstatus->value.related_pid, 0, 0);
1872 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1873 ptrace (PTRACE_CONT, pid, 0, 0);
1874 pid = -1;
1875 save_errno = EINTR;
1876 }
1877 }
1878
1879 clear_sigio_trap ();
1880 clear_sigint_trap ();
1881 }
1882 while (pid == -1 && save_errno == EINTR);
1883
1884 if (pid == -1)
1885 {
1886 warning (_("Child process unexpectedly missing: %s"),
1887 safe_strerror (errno));
1888
1889 /* Claim it exited with unknown signal. */
1890 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1891 ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN;
1892 return minus_one_ptid;
1893 }
1894
1895 if (ourstatus->kind == TARGET_WAITKIND_IGNORE)
1896 store_waitstatus (ourstatus, status);
1897
1898 return pid_to_ptid (pid);
1899 }
1900
1901 /* Stop an active thread, verify it still exists, then resume it. */
1902
1903 static int
1904 stop_and_resume_callback (struct lwp_info *lp, void *data)
1905 {
1906 struct lwp_info *ptr;
1907
1908 if (!lp->stopped && !lp->signalled)
1909 {
1910 stop_callback (lp, NULL);
1911 stop_wait_callback (lp, NULL);
1912 /* Resume if the lwp still exists. */
1913 for (ptr = lwp_list; ptr; ptr = ptr->next)
1914 if (lp == ptr)
1915 {
1916 resume_callback (lp, NULL);
1917 resume_set_callback (lp, NULL);
1918 }
1919 }
1920 return 0;
1921 }
1922
1923 static ptid_t
1924 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1925 {
1926 struct lwp_info *lp = NULL;
1927 int options = 0;
1928 int status = 0;
1929 pid_t pid = PIDGET (ptid);
1930 sigset_t flush_mask;
1931
1932 sigemptyset (&flush_mask);
1933
1934 /* Make sure SIGCHLD is blocked. */
1935 if (!sigismember (&blocked_mask, SIGCHLD))
1936 {
1937 sigaddset (&blocked_mask, SIGCHLD);
1938 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1939 }
1940
1941 retry:
1942
1943 /* Make sure there is at least one LWP that has been resumed, at
1944 least if there are any LWPs at all. */
1945 gdb_assert (num_lwps == 0 || iterate_over_lwps (resumed_callback, NULL));
1946
1947 /* First check if there is a LWP with a wait status pending. */
1948 if (pid == -1)
1949 {
1950 /* Any LWP that's been resumed will do. */
1951 lp = iterate_over_lwps (status_callback, NULL);
1952 if (lp)
1953 {
1954 status = lp->status;
1955 lp->status = 0;
1956
1957 if (debug_linux_nat && status)
1958 fprintf_unfiltered (gdb_stdlog,
1959 "LLW: Using pending wait status %s for %s.\n",
1960 status_to_str (status),
1961 target_pid_to_str (lp->ptid));
1962 }
1963
1964 /* But if we don't fine one, we'll have to wait, and check both
1965 cloned and uncloned processes. We start with the cloned
1966 processes. */
1967 options = __WCLONE | WNOHANG;
1968 }
1969 else if (is_lwp (ptid))
1970 {
1971 if (debug_linux_nat)
1972 fprintf_unfiltered (gdb_stdlog,
1973 "LLW: Waiting for specific LWP %s.\n",
1974 target_pid_to_str (ptid));
1975
1976 /* We have a specific LWP to check. */
1977 lp = find_lwp_pid (ptid);
1978 gdb_assert (lp);
1979 status = lp->status;
1980 lp->status = 0;
1981
1982 if (debug_linux_nat && status)
1983 fprintf_unfiltered (gdb_stdlog,
1984 "LLW: Using pending wait status %s for %s.\n",
1985 status_to_str (status),
1986 target_pid_to_str (lp->ptid));
1987
1988 /* If we have to wait, take into account whether PID is a cloned
1989 process or not. And we have to convert it to something that
1990 the layer beneath us can understand. */
1991 options = lp->cloned ? __WCLONE : 0;
1992 pid = GET_LWP (ptid);
1993 }
1994
1995 if (status && lp->signalled)
1996 {
1997 /* A pending SIGSTOP may interfere with the normal stream of
1998 events. In a typical case where interference is a problem,
1999 we have a SIGSTOP signal pending for LWP A while
2000 single-stepping it, encounter an event in LWP B, and take the
2001 pending SIGSTOP while trying to stop LWP A. After processing
2002 the event in LWP B, LWP A is continued, and we'll never see
2003 the SIGTRAP associated with the last time we were
2004 single-stepping LWP A. */
2005
2006 /* Resume the thread. It should halt immediately returning the
2007 pending SIGSTOP. */
2008 registers_changed ();
2009 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2010 lp->step, TARGET_SIGNAL_0);
2011 if (debug_linux_nat)
2012 fprintf_unfiltered (gdb_stdlog,
2013 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2014 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2015 target_pid_to_str (lp->ptid));
2016 lp->stopped = 0;
2017 gdb_assert (lp->resumed);
2018
2019 /* This should catch the pending SIGSTOP. */
2020 stop_wait_callback (lp, NULL);
2021 }
2022
2023 set_sigint_trap (); /* Causes SIGINT to be passed on to the
2024 attached process. */
2025 set_sigio_trap ();
2026
2027 while (status == 0)
2028 {
2029 pid_t lwpid;
2030
2031 lwpid = my_waitpid (pid, &status, options);
2032 if (lwpid > 0)
2033 {
2034 gdb_assert (pid == -1 || lwpid == pid);
2035
2036 if (debug_linux_nat)
2037 {
2038 fprintf_unfiltered (gdb_stdlog,
2039 "LLW: waitpid %ld received %s\n",
2040 (long) lwpid, status_to_str (status));
2041 }
2042
2043 lp = find_lwp_pid (pid_to_ptid (lwpid));
2044
2045 /* Check for stop events reported by a process we didn't
2046 already know about - anything not already in our LWP
2047 list.
2048
2049 If we're expecting to receive stopped processes after
2050 fork, vfork, and clone events, then we'll just add the
2051 new one to our list and go back to waiting for the event
2052 to be reported - the stopped process might be returned
2053 from waitpid before or after the event is. */
2054 if (WIFSTOPPED (status) && !lp)
2055 {
2056 linux_record_stopped_pid (lwpid);
2057 status = 0;
2058 continue;
2059 }
2060
2061 /* Make sure we don't report an event for the exit of an LWP not in
2062 our list, i.e. not part of the current process. This can happen
2063 if we detach from a program we original forked and then it
2064 exits. */
2065 if (!WIFSTOPPED (status) && !lp)
2066 {
2067 status = 0;
2068 continue;
2069 }
2070
2071 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2072 CLONE_PTRACE processes which do not use the thread library -
2073 otherwise we wouldn't find the new LWP this way. That doesn't
2074 currently work, and the following code is currently unreachable
2075 due to the two blocks above. If it's fixed some day, this code
2076 should be broken out into a function so that we can also pick up
2077 LWPs from the new interface. */
2078 if (!lp)
2079 {
2080 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2081 if (options & __WCLONE)
2082 lp->cloned = 1;
2083
2084 if (threaded)
2085 {
2086 gdb_assert (WIFSTOPPED (status)
2087 && WSTOPSIG (status) == SIGSTOP);
2088 lp->signalled = 1;
2089
2090 if (!in_thread_list (inferior_ptid))
2091 {
2092 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2093 GET_PID (inferior_ptid));
2094 add_thread (inferior_ptid);
2095 }
2096
2097 add_thread (lp->ptid);
2098 printf_unfiltered (_("[New %s]\n"),
2099 target_pid_to_str (lp->ptid));
2100 }
2101 }
2102
2103 /* Handle GNU/Linux's extended waitstatus for trace events. */
2104 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2105 {
2106 if (debug_linux_nat)
2107 fprintf_unfiltered (gdb_stdlog,
2108 "LLW: Handling extended status 0x%06x\n",
2109 status);
2110 if (linux_nat_handle_extended (lp, status))
2111 {
2112 status = 0;
2113 continue;
2114 }
2115 }
2116
2117 /* Check if the thread has exited. */
2118 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2119 {
2120 if (in_thread_list (lp->ptid))
2121 {
2122 /* Core GDB cannot deal with us deleting the current
2123 thread. */
2124 if (!ptid_equal (lp->ptid, inferior_ptid))
2125 delete_thread (lp->ptid);
2126 printf_unfiltered (_("[%s exited]\n"),
2127 target_pid_to_str (lp->ptid));
2128 }
2129
2130 /* If this is the main thread, we must stop all threads and
2131 verify if they are still alive. This is because in the nptl
2132 thread model, there is no signal issued for exiting LWPs
2133 other than the main thread. We only get the main thread
2134 exit signal once all child threads have already exited.
2135 If we stop all the threads and use the stop_wait_callback
2136 to check if they have exited we can determine whether this
2137 signal should be ignored or whether it means the end of the
2138 debugged application, regardless of which threading model
2139 is being used. */
2140 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2141 {
2142 lp->stopped = 1;
2143 iterate_over_lwps (stop_and_resume_callback, NULL);
2144 }
2145
2146 if (debug_linux_nat)
2147 fprintf_unfiltered (gdb_stdlog,
2148 "LLW: %s exited.\n",
2149 target_pid_to_str (lp->ptid));
2150
2151 delete_lwp (lp->ptid);
2152
2153 /* If there is at least one more LWP, then the exit signal
2154 was not the end of the debugged application and should be
2155 ignored. */
2156 if (num_lwps > 0)
2157 {
2158 /* Make sure there is at least one thread running. */
2159 gdb_assert (iterate_over_lwps (running_callback, NULL));
2160
2161 /* Discard the event. */
2162 status = 0;
2163 continue;
2164 }
2165 }
2166
2167 /* Check if the current LWP has previously exited. In the nptl
2168 thread model, LWPs other than the main thread do not issue
2169 signals when they exit so we must check whenever the thread
2170 has stopped. A similar check is made in stop_wait_callback(). */
2171 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2172 {
2173 if (in_thread_list (lp->ptid))
2174 {
2175 /* Core GDB cannot deal with us deleting the current
2176 thread. */
2177 if (!ptid_equal (lp->ptid, inferior_ptid))
2178 delete_thread (lp->ptid);
2179 printf_unfiltered (_("[%s exited]\n"),
2180 target_pid_to_str (lp->ptid));
2181 }
2182 if (debug_linux_nat)
2183 fprintf_unfiltered (gdb_stdlog,
2184 "LLW: %s exited.\n",
2185 target_pid_to_str (lp->ptid));
2186
2187 delete_lwp (lp->ptid);
2188
2189 /* Make sure there is at least one thread running. */
2190 gdb_assert (iterate_over_lwps (running_callback, NULL));
2191
2192 /* Discard the event. */
2193 status = 0;
2194 continue;
2195 }
2196
2197 /* Make sure we don't report a SIGSTOP that we sent
2198 ourselves in an attempt to stop an LWP. */
2199 if (lp->signalled
2200 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2201 {
2202 if (debug_linux_nat)
2203 fprintf_unfiltered (gdb_stdlog,
2204 "LLW: Delayed SIGSTOP caught for %s.\n",
2205 target_pid_to_str (lp->ptid));
2206
2207 /* This is a delayed SIGSTOP. */
2208 lp->signalled = 0;
2209
2210 registers_changed ();
2211 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2212 lp->step, TARGET_SIGNAL_0);
2213 if (debug_linux_nat)
2214 fprintf_unfiltered (gdb_stdlog,
2215 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2216 lp->step ?
2217 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2218 target_pid_to_str (lp->ptid));
2219
2220 lp->stopped = 0;
2221 gdb_assert (lp->resumed);
2222
2223 /* Discard the event. */
2224 status = 0;
2225 continue;
2226 }
2227
2228 break;
2229 }
2230
2231 if (pid == -1)
2232 {
2233 /* Alternate between checking cloned and uncloned processes. */
2234 options ^= __WCLONE;
2235
2236 /* And suspend every time we have checked both. */
2237 if (options & __WCLONE)
2238 sigsuspend (&suspend_mask);
2239 }
2240
2241 /* We shouldn't end up here unless we want to try again. */
2242 gdb_assert (status == 0);
2243 }
2244
2245 clear_sigio_trap ();
2246 clear_sigint_trap ();
2247
2248 gdb_assert (lp);
2249
2250 /* Don't report signals that GDB isn't interested in, such as
2251 signals that are neither printed nor stopped upon. Stopping all
2252 threads can be a bit time-consuming so if we want decent
2253 performance with heavily multi-threaded programs, especially when
2254 they're using a high frequency timer, we'd better avoid it if we
2255 can. */
2256
2257 if (WIFSTOPPED (status))
2258 {
2259 int signo = target_signal_from_host (WSTOPSIG (status));
2260
2261 if (signal_stop_state (signo) == 0
2262 && signal_print_state (signo) == 0
2263 && signal_pass_state (signo) == 1)
2264 {
2265 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2266 here? It is not clear we should. GDB may not expect
2267 other threads to run. On the other hand, not resuming
2268 newly attached threads may cause an unwanted delay in
2269 getting them running. */
2270 registers_changed ();
2271 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2272 lp->step, signo);
2273 if (debug_linux_nat)
2274 fprintf_unfiltered (gdb_stdlog,
2275 "LLW: %s %s, %s (preempt 'handle')\n",
2276 lp->step ?
2277 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2278 target_pid_to_str (lp->ptid),
2279 signo ? strsignal (signo) : "0");
2280 lp->stopped = 0;
2281 status = 0;
2282 goto retry;
2283 }
2284
2285 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2286 {
2287 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2288 forwarded to the entire process group, that is, all LWP's
2289 will receive it. Since we only want to report it once,
2290 we try to flush it from all LWPs except this one. */
2291 sigaddset (&flush_mask, SIGINT);
2292 }
2293 }
2294
2295 /* This LWP is stopped now. */
2296 lp->stopped = 1;
2297
2298 if (debug_linux_nat)
2299 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2300 status_to_str (status), target_pid_to_str (lp->ptid));
2301
2302 /* Now stop all other LWP's ... */
2303 iterate_over_lwps (stop_callback, NULL);
2304
2305 /* ... and wait until all of them have reported back that they're no
2306 longer running. */
2307 iterate_over_lwps (stop_wait_callback, &flush_mask);
2308 iterate_over_lwps (flush_callback, &flush_mask);
2309
2310 /* If we're not waiting for a specific LWP, choose an event LWP from
2311 among those that have had events. Giving equal priority to all
2312 LWPs that have had events helps prevent starvation. */
2313 if (pid == -1)
2314 select_event_lwp (&lp, &status);
2315
2316 /* Now that we've selected our final event LWP, cancel any
2317 breakpoints in other LWPs that have hit a GDB breakpoint. See
2318 the comment in cancel_breakpoints_callback to find out why. */
2319 iterate_over_lwps (cancel_breakpoints_callback, lp);
2320
2321 /* If we're not running in "threaded" mode, we'll report the bare
2322 process id. */
2323
2324 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2325 {
2326 trap_ptid = (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid)));
2327 if (debug_linux_nat)
2328 fprintf_unfiltered (gdb_stdlog,
2329 "LLW: trap_ptid is %s.\n",
2330 target_pid_to_str (trap_ptid));
2331 }
2332 else
2333 trap_ptid = null_ptid;
2334
2335 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2336 {
2337 *ourstatus = lp->waitstatus;
2338 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2339 }
2340 else
2341 store_waitstatus (ourstatus, status);
2342
2343 return (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid)));
2344 }
2345
2346 static int
2347 kill_callback (struct lwp_info *lp, void *data)
2348 {
2349 errno = 0;
2350 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2351 if (debug_linux_nat)
2352 fprintf_unfiltered (gdb_stdlog,
2353 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2354 target_pid_to_str (lp->ptid),
2355 errno ? safe_strerror (errno) : "OK");
2356
2357 return 0;
2358 }
2359
2360 static int
2361 kill_wait_callback (struct lwp_info *lp, void *data)
2362 {
2363 pid_t pid;
2364
2365 /* We must make sure that there are no pending events (delayed
2366 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2367 program doesn't interfere with any following debugging session. */
2368
2369 /* For cloned processes we must check both with __WCLONE and
2370 without, since the exit status of a cloned process isn't reported
2371 with __WCLONE. */
2372 if (lp->cloned)
2373 {
2374 do
2375 {
2376 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2377 if (pid != (pid_t) -1 && debug_linux_nat)
2378 {
2379 fprintf_unfiltered (gdb_stdlog,
2380 "KWC: wait %s received unknown.\n",
2381 target_pid_to_str (lp->ptid));
2382 }
2383 }
2384 while (pid == GET_LWP (lp->ptid));
2385
2386 gdb_assert (pid == -1 && errno == ECHILD);
2387 }
2388
2389 do
2390 {
2391 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2392 if (pid != (pid_t) -1 && debug_linux_nat)
2393 {
2394 fprintf_unfiltered (gdb_stdlog,
2395 "KWC: wait %s received unk.\n",
2396 target_pid_to_str (lp->ptid));
2397 }
2398 }
2399 while (pid == GET_LWP (lp->ptid));
2400
2401 gdb_assert (pid == -1 && errno == ECHILD);
2402 return 0;
2403 }
2404
2405 static void
2406 linux_nat_kill (void)
2407 {
2408 /* Kill all LWP's ... */
2409 iterate_over_lwps (kill_callback, NULL);
2410
2411 /* ... and wait until we've flushed all events. */
2412 iterate_over_lwps (kill_wait_callback, NULL);
2413
2414 target_mourn_inferior ();
2415 }
2416
2417 static void
2418 linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
2419 int from_tty)
2420 {
2421 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
2422 }
2423
2424 static void
2425 linux_nat_mourn_inferior (void)
2426 {
2427 trap_ptid = null_ptid;
2428
2429 /* Destroy LWP info; it's no longer valid. */
2430 init_lwp_list ();
2431
2432 /* Restore the original signal mask. */
2433 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2434 sigemptyset (&blocked_mask);
2435
2436 linux_ops->to_mourn_inferior ();
2437 }
2438
2439 static LONGEST
2440 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2441 const char *annex, gdb_byte *readbuf,
2442 const gdb_byte *writebuf,
2443 ULONGEST offset, LONGEST len)
2444 {
2445 struct cleanup *old_chain = save_inferior_ptid ();
2446 LONGEST xfer;
2447
2448 if (is_lwp (inferior_ptid))
2449 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2450
2451 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2452 offset, len);
2453
2454 do_cleanups (old_chain);
2455 return xfer;
2456 }
2457
2458 static int
2459 linux_nat_thread_alive (ptid_t ptid)
2460 {
2461 gdb_assert (is_lwp (ptid));
2462
2463 errno = 0;
2464 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2465 if (debug_linux_nat)
2466 fprintf_unfiltered (gdb_stdlog,
2467 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2468 target_pid_to_str (ptid),
2469 errno ? safe_strerror (errno) : "OK");
2470 if (errno)
2471 return 0;
2472
2473 return 1;
2474 }
2475
2476 static char *
2477 linux_nat_pid_to_str (ptid_t ptid)
2478 {
2479 static char buf[64];
2480
2481 if (is_lwp (ptid))
2482 {
2483 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2484 return buf;
2485 }
2486
2487 return normal_pid_to_str (ptid);
2488 }
2489
2490 static void
2491 linux_nat_fetch_registers (int regnum)
2492 {
2493 /* to_fetch_registers will honor the LWP ID, so we can use it directly. */
2494 linux_ops->to_fetch_registers (regnum);
2495 }
2496
2497 static void
2498 linux_nat_store_registers (int regnum)
2499 {
2500 /* to_store_registers will honor the LWP ID, so we can use it directly. */
2501 linux_ops->to_store_registers (regnum);
2502 }
2503
2504 static void
2505 linux_nat_child_post_startup_inferior (ptid_t ptid)
2506 {
2507 linux_ops->to_post_startup_inferior (ptid);
2508 }
2509
2510 static void
2511 init_linux_nat_ops (void)
2512 {
2513 #if 0
2514 linux_nat_ops.to_open = linux_nat_open;
2515 #endif
2516 linux_nat_ops.to_shortname = "lwp-layer";
2517 linux_nat_ops.to_longname = "lwp-layer";
2518 linux_nat_ops.to_doc = "Low level threads support (LWP layer)";
2519 linux_nat_ops.to_attach = linux_nat_attach;
2520 linux_nat_ops.to_detach = linux_nat_detach;
2521 linux_nat_ops.to_resume = linux_nat_resume;
2522 linux_nat_ops.to_wait = linux_nat_wait;
2523 linux_nat_ops.to_fetch_registers = linux_nat_fetch_registers;
2524 linux_nat_ops.to_store_registers = linux_nat_store_registers;
2525 linux_nat_ops.to_xfer_partial = linux_nat_xfer_partial;
2526 linux_nat_ops.to_kill = linux_nat_kill;
2527 linux_nat_ops.to_create_inferior = linux_nat_create_inferior;
2528 linux_nat_ops.to_mourn_inferior = linux_nat_mourn_inferior;
2529 linux_nat_ops.to_thread_alive = linux_nat_thread_alive;
2530 linux_nat_ops.to_pid_to_str = linux_nat_pid_to_str;
2531 linux_nat_ops.to_post_startup_inferior
2532 = linux_nat_child_post_startup_inferior;
2533 linux_nat_ops.to_post_attach = child_post_attach;
2534 linux_nat_ops.to_insert_fork_catchpoint = child_insert_fork_catchpoint;
2535 linux_nat_ops.to_insert_vfork_catchpoint = child_insert_vfork_catchpoint;
2536 linux_nat_ops.to_insert_exec_catchpoint = child_insert_exec_catchpoint;
2537
2538 linux_nat_ops.to_stratum = thread_stratum;
2539 linux_nat_ops.to_has_thread_control = tc_schedlock;
2540 linux_nat_ops.to_magic = OPS_MAGIC;
2541 }
2542
2543 static void
2544 sigchld_handler (int signo)
2545 {
2546 /* Do nothing. The only reason for this handler is that it allows
2547 us to use sigsuspend in linux_nat_wait above to wait for the
2548 arrival of a SIGCHLD. */
2549 }
2550
2551 /* Accepts an integer PID; Returns a string representing a file that
2552 can be opened to get the symbols for the child process. */
2553
2554 char *
2555 child_pid_to_exec_file (int pid)
2556 {
2557 char *name1, *name2;
2558
2559 name1 = xmalloc (MAXPATHLEN);
2560 name2 = xmalloc (MAXPATHLEN);
2561 make_cleanup (xfree, name1);
2562 make_cleanup (xfree, name2);
2563 memset (name2, 0, MAXPATHLEN);
2564
2565 sprintf (name1, "/proc/%d/exe", pid);
2566 if (readlink (name1, name2, MAXPATHLEN) > 0)
2567 return name2;
2568 else
2569 return name1;
2570 }
2571
2572 /* Service function for corefiles and info proc. */
2573
2574 static int
2575 read_mapping (FILE *mapfile,
2576 long long *addr,
2577 long long *endaddr,
2578 char *permissions,
2579 long long *offset,
2580 char *device, long long *inode, char *filename)
2581 {
2582 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2583 addr, endaddr, permissions, offset, device, inode);
2584
2585 filename[0] = '\0';
2586 if (ret > 0 && ret != EOF)
2587 {
2588 /* Eat everything up to EOL for the filename. This will prevent
2589 weird filenames (such as one with embedded whitespace) from
2590 confusing this code. It also makes this code more robust in
2591 respect to annotations the kernel may add after the filename.
2592
2593 Note the filename is used for informational purposes
2594 only. */
2595 ret += fscanf (mapfile, "%[^\n]\n", filename);
2596 }
2597
2598 return (ret != 0 && ret != EOF);
2599 }
2600
2601 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2602 regions in the inferior for a corefile. */
2603
2604 static int
2605 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2606 unsigned long,
2607 int, int, int, void *), void *obfd)
2608 {
2609 long long pid = PIDGET (inferior_ptid);
2610 char mapsfilename[MAXPATHLEN];
2611 FILE *mapsfile;
2612 long long addr, endaddr, size, offset, inode;
2613 char permissions[8], device[8], filename[MAXPATHLEN];
2614 int read, write, exec;
2615 int ret;
2616
2617 /* Compose the filename for the /proc memory map, and open it. */
2618 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2619 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2620 error (_("Could not open %s."), mapsfilename);
2621
2622 if (info_verbose)
2623 fprintf_filtered (gdb_stdout,
2624 "Reading memory regions from %s\n", mapsfilename);
2625
2626 /* Now iterate until end-of-file. */
2627 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2628 &offset, &device[0], &inode, &filename[0]))
2629 {
2630 size = endaddr - addr;
2631
2632 /* Get the segment's permissions. */
2633 read = (strchr (permissions, 'r') != 0);
2634 write = (strchr (permissions, 'w') != 0);
2635 exec = (strchr (permissions, 'x') != 0);
2636
2637 if (info_verbose)
2638 {
2639 fprintf_filtered (gdb_stdout,
2640 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2641 size, paddr_nz (addr),
2642 read ? 'r' : ' ',
2643 write ? 'w' : ' ', exec ? 'x' : ' ');
2644 if (filename && filename[0])
2645 fprintf_filtered (gdb_stdout, " for %s", filename);
2646 fprintf_filtered (gdb_stdout, "\n");
2647 }
2648
2649 /* Invoke the callback function to create the corefile
2650 segment. */
2651 func (addr, size, read, write, exec, obfd);
2652 }
2653 fclose (mapsfile);
2654 return 0;
2655 }
2656
2657 /* Records the thread's register state for the corefile note
2658 section. */
2659
2660 static char *
2661 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2662 char *note_data, int *note_size)
2663 {
2664 gdb_gregset_t gregs;
2665 gdb_fpregset_t fpregs;
2666 #ifdef FILL_FPXREGSET
2667 gdb_fpxregset_t fpxregs;
2668 #endif
2669 unsigned long lwp = ptid_get_lwp (ptid);
2670
2671 fill_gregset (&gregs, -1);
2672 note_data = (char *) elfcore_write_prstatus (obfd,
2673 note_data,
2674 note_size,
2675 lwp,
2676 stop_signal, &gregs);
2677
2678 fill_fpregset (&fpregs, -1);
2679 note_data = (char *) elfcore_write_prfpreg (obfd,
2680 note_data,
2681 note_size,
2682 &fpregs, sizeof (fpregs));
2683 #ifdef FILL_FPXREGSET
2684 fill_fpxregset (&fpxregs, -1);
2685 note_data = (char *) elfcore_write_prxfpreg (obfd,
2686 note_data,
2687 note_size,
2688 &fpxregs, sizeof (fpxregs));
2689 #endif
2690 return note_data;
2691 }
2692
2693 struct linux_nat_corefile_thread_data
2694 {
2695 bfd *obfd;
2696 char *note_data;
2697 int *note_size;
2698 int num_notes;
2699 };
2700
2701 /* Called by gdbthread.c once per thread. Records the thread's
2702 register state for the corefile note section. */
2703
2704 static int
2705 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2706 {
2707 struct linux_nat_corefile_thread_data *args = data;
2708 ptid_t saved_ptid = inferior_ptid;
2709
2710 inferior_ptid = ti->ptid;
2711 registers_changed ();
2712 target_fetch_registers (-1); /* FIXME should not be necessary;
2713 fill_gregset should do it automatically. */
2714 args->note_data = linux_nat_do_thread_registers (args->obfd,
2715 ti->ptid,
2716 args->note_data,
2717 args->note_size);
2718 args->num_notes++;
2719 inferior_ptid = saved_ptid;
2720 registers_changed ();
2721 target_fetch_registers (-1); /* FIXME should not be necessary;
2722 fill_gregset should do it automatically. */
2723 return 0;
2724 }
2725
2726 /* Records the register state for the corefile note section. */
2727
2728 static char *
2729 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2730 char *note_data, int *note_size)
2731 {
2732 registers_changed ();
2733 target_fetch_registers (-1); /* FIXME should not be necessary;
2734 fill_gregset should do it automatically. */
2735 return linux_nat_do_thread_registers (obfd,
2736 ptid_build (ptid_get_pid (inferior_ptid),
2737 ptid_get_pid (inferior_ptid),
2738 0),
2739 note_data, note_size);
2740 return note_data;
2741 }
2742
2743 /* Fills the "to_make_corefile_note" target vector. Builds the note
2744 section for a corefile, and returns it in a malloc buffer. */
2745
2746 static char *
2747 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2748 {
2749 struct linux_nat_corefile_thread_data thread_args;
2750 struct cleanup *old_chain;
2751 char fname[16] = { '\0' };
2752 char psargs[80] = { '\0' };
2753 char *note_data = NULL;
2754 ptid_t current_ptid = inferior_ptid;
2755 gdb_byte *auxv;
2756 int auxv_len;
2757
2758 if (get_exec_file (0))
2759 {
2760 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2761 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2762 if (get_inferior_args ())
2763 {
2764 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2765 strncat (psargs, get_inferior_args (),
2766 sizeof (psargs) - strlen (psargs));
2767 }
2768 note_data = (char *) elfcore_write_prpsinfo (obfd,
2769 note_data,
2770 note_size, fname, psargs);
2771 }
2772
2773 /* Dump information for threads. */
2774 thread_args.obfd = obfd;
2775 thread_args.note_data = note_data;
2776 thread_args.note_size = note_size;
2777 thread_args.num_notes = 0;
2778 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2779 if (thread_args.num_notes == 0)
2780 {
2781 /* iterate_over_threads didn't come up with any threads; just
2782 use inferior_ptid. */
2783 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2784 note_data, note_size);
2785 }
2786 else
2787 {
2788 note_data = thread_args.note_data;
2789 }
2790
2791 auxv_len = target_auxv_read (&current_target, &auxv);
2792 if (auxv_len > 0)
2793 {
2794 note_data = elfcore_write_note (obfd, note_data, note_size,
2795 "CORE", NT_AUXV, auxv, auxv_len);
2796 xfree (auxv);
2797 }
2798
2799 make_cleanup (xfree, note_data);
2800 return note_data;
2801 }
2802
2803 /* Implement the "info proc" command. */
2804
2805 static void
2806 linux_nat_info_proc_cmd (char *args, int from_tty)
2807 {
2808 long long pid = PIDGET (inferior_ptid);
2809 FILE *procfile;
2810 char **argv = NULL;
2811 char buffer[MAXPATHLEN];
2812 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2813 int cmdline_f = 1;
2814 int cwd_f = 1;
2815 int exe_f = 1;
2816 int mappings_f = 0;
2817 int environ_f = 0;
2818 int status_f = 0;
2819 int stat_f = 0;
2820 int all = 0;
2821 struct stat dummy;
2822
2823 if (args)
2824 {
2825 /* Break up 'args' into an argv array. */
2826 if ((argv = buildargv (args)) == NULL)
2827 nomem (0);
2828 else
2829 make_cleanup_freeargv (argv);
2830 }
2831 while (argv != NULL && *argv != NULL)
2832 {
2833 if (isdigit (argv[0][0]))
2834 {
2835 pid = strtoul (argv[0], NULL, 10);
2836 }
2837 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2838 {
2839 mappings_f = 1;
2840 }
2841 else if (strcmp (argv[0], "status") == 0)
2842 {
2843 status_f = 1;
2844 }
2845 else if (strcmp (argv[0], "stat") == 0)
2846 {
2847 stat_f = 1;
2848 }
2849 else if (strcmp (argv[0], "cmd") == 0)
2850 {
2851 cmdline_f = 1;
2852 }
2853 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2854 {
2855 exe_f = 1;
2856 }
2857 else if (strcmp (argv[0], "cwd") == 0)
2858 {
2859 cwd_f = 1;
2860 }
2861 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2862 {
2863 all = 1;
2864 }
2865 else
2866 {
2867 /* [...] (future options here) */
2868 }
2869 argv++;
2870 }
2871 if (pid == 0)
2872 error (_("No current process: you must name one."));
2873
2874 sprintf (fname1, "/proc/%lld", pid);
2875 if (stat (fname1, &dummy) != 0)
2876 error (_("No /proc directory: '%s'"), fname1);
2877
2878 printf_filtered (_("process %lld\n"), pid);
2879 if (cmdline_f || all)
2880 {
2881 sprintf (fname1, "/proc/%lld/cmdline", pid);
2882 if ((procfile = fopen (fname1, "r")) > 0)
2883 {
2884 fgets (buffer, sizeof (buffer), procfile);
2885 printf_filtered ("cmdline = '%s'\n", buffer);
2886 fclose (procfile);
2887 }
2888 else
2889 warning (_("unable to open /proc file '%s'"), fname1);
2890 }
2891 if (cwd_f || all)
2892 {
2893 sprintf (fname1, "/proc/%lld/cwd", pid);
2894 memset (fname2, 0, sizeof (fname2));
2895 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2896 printf_filtered ("cwd = '%s'\n", fname2);
2897 else
2898 warning (_("unable to read link '%s'"), fname1);
2899 }
2900 if (exe_f || all)
2901 {
2902 sprintf (fname1, "/proc/%lld/exe", pid);
2903 memset (fname2, 0, sizeof (fname2));
2904 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2905 printf_filtered ("exe = '%s'\n", fname2);
2906 else
2907 warning (_("unable to read link '%s'"), fname1);
2908 }
2909 if (mappings_f || all)
2910 {
2911 sprintf (fname1, "/proc/%lld/maps", pid);
2912 if ((procfile = fopen (fname1, "r")) > 0)
2913 {
2914 long long addr, endaddr, size, offset, inode;
2915 char permissions[8], device[8], filename[MAXPATHLEN];
2916
2917 printf_filtered (_("Mapped address spaces:\n\n"));
2918 if (TARGET_ADDR_BIT == 32)
2919 {
2920 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2921 "Start Addr",
2922 " End Addr",
2923 " Size", " Offset", "objfile");
2924 }
2925 else
2926 {
2927 printf_filtered (" %18s %18s %10s %10s %7s\n",
2928 "Start Addr",
2929 " End Addr",
2930 " Size", " Offset", "objfile");
2931 }
2932
2933 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2934 &offset, &device[0], &inode, &filename[0]))
2935 {
2936 size = endaddr - addr;
2937
2938 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2939 calls here (and possibly above) should be abstracted
2940 out into their own functions? Andrew suggests using
2941 a generic local_address_string instead to print out
2942 the addresses; that makes sense to me, too. */
2943
2944 if (TARGET_ADDR_BIT == 32)
2945 {
2946 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2947 (unsigned long) addr, /* FIXME: pr_addr */
2948 (unsigned long) endaddr,
2949 (int) size,
2950 (unsigned int) offset,
2951 filename[0] ? filename : "");
2952 }
2953 else
2954 {
2955 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2956 (unsigned long) addr, /* FIXME: pr_addr */
2957 (unsigned long) endaddr,
2958 (int) size,
2959 (unsigned int) offset,
2960 filename[0] ? filename : "");
2961 }
2962 }
2963
2964 fclose (procfile);
2965 }
2966 else
2967 warning (_("unable to open /proc file '%s'"), fname1);
2968 }
2969 if (status_f || all)
2970 {
2971 sprintf (fname1, "/proc/%lld/status", pid);
2972 if ((procfile = fopen (fname1, "r")) > 0)
2973 {
2974 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2975 puts_filtered (buffer);
2976 fclose (procfile);
2977 }
2978 else
2979 warning (_("unable to open /proc file '%s'"), fname1);
2980 }
2981 if (stat_f || all)
2982 {
2983 sprintf (fname1, "/proc/%lld/stat", pid);
2984 if ((procfile = fopen (fname1, "r")) > 0)
2985 {
2986 int itmp;
2987 char ctmp;
2988
2989 if (fscanf (procfile, "%d ", &itmp) > 0)
2990 printf_filtered (_("Process: %d\n"), itmp);
2991 if (fscanf (procfile, "%s ", &buffer[0]) > 0)
2992 printf_filtered (_("Exec file: %s\n"), buffer);
2993 if (fscanf (procfile, "%c ", &ctmp) > 0)
2994 printf_filtered (_("State: %c\n"), ctmp);
2995 if (fscanf (procfile, "%d ", &itmp) > 0)
2996 printf_filtered (_("Parent process: %d\n"), itmp);
2997 if (fscanf (procfile, "%d ", &itmp) > 0)
2998 printf_filtered (_("Process group: %d\n"), itmp);
2999 if (fscanf (procfile, "%d ", &itmp) > 0)
3000 printf_filtered (_("Session id: %d\n"), itmp);
3001 if (fscanf (procfile, "%d ", &itmp) > 0)
3002 printf_filtered (_("TTY: %d\n"), itmp);
3003 if (fscanf (procfile, "%d ", &itmp) > 0)
3004 printf_filtered (_("TTY owner process group: %d\n"), itmp);
3005 if (fscanf (procfile, "%u ", &itmp) > 0)
3006 printf_filtered (_("Flags: 0x%x\n"), itmp);
3007 if (fscanf (procfile, "%u ", &itmp) > 0)
3008 printf_filtered (_("Minor faults (no memory page): %u\n"),
3009 (unsigned int) itmp);
3010 if (fscanf (procfile, "%u ", &itmp) > 0)
3011 printf_filtered (_("Minor faults, children: %u\n"),
3012 (unsigned int) itmp);
3013 if (fscanf (procfile, "%u ", &itmp) > 0)
3014 printf_filtered (_("Major faults (memory page faults): %u\n"),
3015 (unsigned int) itmp);
3016 if (fscanf (procfile, "%u ", &itmp) > 0)
3017 printf_filtered (_("Major faults, children: %u\n"),
3018 (unsigned int) itmp);
3019 if (fscanf (procfile, "%d ", &itmp) > 0)
3020 printf_filtered ("utime: %d\n", itmp);
3021 if (fscanf (procfile, "%d ", &itmp) > 0)
3022 printf_filtered ("stime: %d\n", itmp);
3023 if (fscanf (procfile, "%d ", &itmp) > 0)
3024 printf_filtered ("utime, children: %d\n", itmp);
3025 if (fscanf (procfile, "%d ", &itmp) > 0)
3026 printf_filtered ("stime, children: %d\n", itmp);
3027 if (fscanf (procfile, "%d ", &itmp) > 0)
3028 printf_filtered (_("jiffies remaining in current time slice: %d\n"),
3029 itmp);
3030 if (fscanf (procfile, "%d ", &itmp) > 0)
3031 printf_filtered ("'nice' value: %d\n", itmp);
3032 if (fscanf (procfile, "%u ", &itmp) > 0)
3033 printf_filtered (_("jiffies until next timeout: %u\n"),
3034 (unsigned int) itmp);
3035 if (fscanf (procfile, "%u ", &itmp) > 0)
3036 printf_filtered ("jiffies until next SIGALRM: %u\n",
3037 (unsigned int) itmp);
3038 if (fscanf (procfile, "%d ", &itmp) > 0)
3039 printf_filtered (_("start time (jiffies since system boot): %d\n"),
3040 itmp);
3041 if (fscanf (procfile, "%u ", &itmp) > 0)
3042 printf_filtered (_("Virtual memory size: %u\n"),
3043 (unsigned int) itmp);
3044 if (fscanf (procfile, "%u ", &itmp) > 0)
3045 printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp);
3046 if (fscanf (procfile, "%u ", &itmp) > 0)
3047 printf_filtered ("rlim: %u\n", (unsigned int) itmp);
3048 if (fscanf (procfile, "%u ", &itmp) > 0)
3049 printf_filtered (_("Start of text: 0x%x\n"), itmp);
3050 if (fscanf (procfile, "%u ", &itmp) > 0)
3051 printf_filtered (_("End of text: 0x%x\n"), itmp);
3052 if (fscanf (procfile, "%u ", &itmp) > 0)
3053 printf_filtered (_("Start of stack: 0x%x\n"), itmp);
3054 #if 0 /* Don't know how architecture-dependent the rest is...
3055 Anyway the signal bitmap info is available from "status". */
3056 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
3057 printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp);
3058 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
3059 printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp);
3060 if (fscanf (procfile, "%d ", &itmp) > 0)
3061 printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp);
3062 if (fscanf (procfile, "%d ", &itmp) > 0)
3063 printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp);
3064 if (fscanf (procfile, "%d ", &itmp) > 0)
3065 printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp);
3066 if (fscanf (procfile, "%d ", &itmp) > 0)
3067 printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp);
3068 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
3069 printf_filtered (_("wchan (system call): 0x%x\n"), itmp);
3070 #endif
3071 fclose (procfile);
3072 }
3073 else
3074 warning (_("unable to open /proc file '%s'"), fname1);
3075 }
3076 }
3077
3078 /* Implement the to_xfer_partial interface for memory reads using the /proc
3079 filesystem. Because we can use a single read() call for /proc, this
3080 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3081 but it doesn't support writes. */
3082
3083 static LONGEST
3084 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3085 const char *annex, gdb_byte *readbuf,
3086 const gdb_byte *writebuf,
3087 ULONGEST offset, LONGEST len)
3088 {
3089 LONGEST ret;
3090 int fd;
3091 char filename[64];
3092
3093 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3094 return 0;
3095
3096 /* Don't bother for one word. */
3097 if (len < 3 * sizeof (long))
3098 return 0;
3099
3100 /* We could keep this file open and cache it - possibly one per
3101 thread. That requires some juggling, but is even faster. */
3102 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3103 fd = open (filename, O_RDONLY | O_LARGEFILE);
3104 if (fd == -1)
3105 return 0;
3106
3107 /* If pread64 is available, use it. It's faster if the kernel
3108 supports it (only one syscall), and it's 64-bit safe even on
3109 32-bit platforms (for instance, SPARC debugging a SPARC64
3110 application). */
3111 #ifdef HAVE_PREAD64
3112 if (pread64 (fd, readbuf, len, offset) != len)
3113 #else
3114 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3115 #endif
3116 ret = 0;
3117 else
3118 ret = len;
3119
3120 close (fd);
3121 return ret;
3122 }
3123
3124 /* Parse LINE as a signal set and add its set bits to SIGS. */
3125
3126 static void
3127 add_line_to_sigset (const char *line, sigset_t *sigs)
3128 {
3129 int len = strlen (line) - 1;
3130 const char *p;
3131 int signum;
3132
3133 if (line[len] != '\n')
3134 error (_("Could not parse signal set: %s"), line);
3135
3136 p = line;
3137 signum = len * 4;
3138 while (len-- > 0)
3139 {
3140 int digit;
3141
3142 if (*p >= '0' && *p <= '9')
3143 digit = *p - '0';
3144 else if (*p >= 'a' && *p <= 'f')
3145 digit = *p - 'a' + 10;
3146 else
3147 error (_("Could not parse signal set: %s"), line);
3148
3149 signum -= 4;
3150
3151 if (digit & 1)
3152 sigaddset (sigs, signum + 1);
3153 if (digit & 2)
3154 sigaddset (sigs, signum + 2);
3155 if (digit & 4)
3156 sigaddset (sigs, signum + 3);
3157 if (digit & 8)
3158 sigaddset (sigs, signum + 4);
3159
3160 p++;
3161 }
3162 }
3163
3164 /* Find process PID's pending signals from /proc/pid/status and set
3165 SIGS to match. */
3166
3167 void
3168 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3169 {
3170 FILE *procfile;
3171 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3172 int signum;
3173
3174 sigemptyset (pending);
3175 sigemptyset (blocked);
3176 sigemptyset (ignored);
3177 sprintf (fname, "/proc/%d/status", pid);
3178 procfile = fopen (fname, "r");
3179 if (procfile == NULL)
3180 error (_("Could not open %s"), fname);
3181
3182 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3183 {
3184 /* Normal queued signals are on the SigPnd line in the status
3185 file. However, 2.6 kernels also have a "shared" pending
3186 queue for delivering signals to a thread group, so check for
3187 a ShdPnd line also.
3188
3189 Unfortunately some Red Hat kernels include the shared pending
3190 queue but not the ShdPnd status field. */
3191
3192 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3193 add_line_to_sigset (buffer + 8, pending);
3194 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3195 add_line_to_sigset (buffer + 8, pending);
3196 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3197 add_line_to_sigset (buffer + 8, blocked);
3198 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3199 add_line_to_sigset (buffer + 8, ignored);
3200 }
3201
3202 fclose (procfile);
3203 }
3204
3205 static LONGEST
3206 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3207 const char *annex, gdb_byte *readbuf,
3208 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3209 {
3210 LONGEST xfer;
3211
3212 if (object == TARGET_OBJECT_AUXV)
3213 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3214 offset, len);
3215
3216 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3217 offset, len);
3218 if (xfer != 0)
3219 return xfer;
3220
3221 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3222 offset, len);
3223 }
3224
3225 #ifndef FETCH_INFERIOR_REGISTERS
3226
3227 /* Return the address in the core dump or inferior of register
3228 REGNO. */
3229
3230 static CORE_ADDR
3231 linux_register_u_offset (int regno)
3232 {
3233 /* FIXME drow/2005-09-04: The hardcoded use of register_addr should go
3234 away. This requires disentangling the various definitions of it
3235 (particularly alpha-nat.c's). */
3236 return register_addr (regno, 0);
3237 }
3238
3239 #endif
3240
3241 /* Create a prototype generic Linux target. The client can override
3242 it with local methods. */
3243
3244 struct target_ops *
3245 linux_target (void)
3246 {
3247 struct target_ops *t;
3248
3249 #ifdef FETCH_INFERIOR_REGISTERS
3250 t = inf_ptrace_target ();
3251 #else
3252 t = inf_ptrace_trad_target (linux_register_u_offset);
3253 #endif
3254 t->to_wait = child_wait;
3255 t->to_kill = kill_inferior;
3256 t->to_insert_fork_catchpoint = child_insert_fork_catchpoint;
3257 t->to_insert_vfork_catchpoint = child_insert_vfork_catchpoint;
3258 t->to_insert_exec_catchpoint = child_insert_exec_catchpoint;
3259 t->to_pid_to_exec_file = child_pid_to_exec_file;
3260 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3261 t->to_post_attach = child_post_attach;
3262 t->to_follow_fork = child_follow_fork;
3263 t->to_find_memory_regions = linux_nat_find_memory_regions;
3264 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3265
3266 super_xfer_partial = t->to_xfer_partial;
3267 t->to_xfer_partial = linux_xfer_partial;
3268
3269 super_mourn_inferior = t->to_mourn_inferior;
3270 t->to_mourn_inferior = child_mourn_inferior;
3271
3272 linux_ops = t;
3273 return t;
3274 }
3275
3276 void
3277 _initialize_linux_nat (void)
3278 {
3279 struct sigaction action;
3280 extern void thread_db_init (struct target_ops *);
3281
3282 add_info ("proc", linux_nat_info_proc_cmd, _("\
3283 Show /proc process information about any running process.\n\
3284 Specify any process id, or use the program being debugged by default.\n\
3285 Specify any of the following keywords for detailed info:\n\
3286 mappings -- list of mapped memory regions.\n\
3287 stat -- list a bunch of random process info.\n\
3288 status -- list a different bunch of random process info.\n\
3289 all -- list all available /proc info."));
3290
3291 init_linux_nat_ops ();
3292 add_target (&linux_nat_ops);
3293 thread_db_init (&linux_nat_ops);
3294
3295 /* Save the original signal mask. */
3296 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3297
3298 action.sa_handler = sigchld_handler;
3299 sigemptyset (&action.sa_mask);
3300 action.sa_flags = SA_RESTART;
3301 sigaction (SIGCHLD, &action, NULL);
3302
3303 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3304 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3305 sigdelset (&suspend_mask, SIGCHLD);
3306
3307 sigemptyset (&blocked_mask);
3308
3309 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3310 Set debugging of GNU/Linux lwp module."), _("\
3311 Show debugging of GNU/Linux lwp module."), _("\
3312 Enables printf debugging output."),
3313 NULL,
3314 show_debug_linux_nat,
3315 &setdebuglist, &showdebuglist);
3316 }
3317 \f
3318
3319 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3320 the GNU/Linux Threads library and therefore doesn't really belong
3321 here. */
3322
3323 /* Read variable NAME in the target and return its value if found.
3324 Otherwise return zero. It is assumed that the type of the variable
3325 is `int'. */
3326
3327 static int
3328 get_signo (const char *name)
3329 {
3330 struct minimal_symbol *ms;
3331 int signo;
3332
3333 ms = lookup_minimal_symbol (name, NULL, NULL);
3334 if (ms == NULL)
3335 return 0;
3336
3337 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3338 sizeof (signo)) != 0)
3339 return 0;
3340
3341 return signo;
3342 }
3343
3344 /* Return the set of signals used by the threads library in *SET. */
3345
3346 void
3347 lin_thread_get_thread_signals (sigset_t *set)
3348 {
3349 struct sigaction action;
3350 int restart, cancel;
3351
3352 sigemptyset (set);
3353
3354 restart = get_signo ("__pthread_sig_restart");
3355 if (restart == 0)
3356 return;
3357
3358 cancel = get_signo ("__pthread_sig_cancel");
3359 if (cancel == 0)
3360 return;
3361
3362 sigaddset (set, restart);
3363 sigaddset (set, cancel);
3364
3365 /* The GNU/Linux Threads library makes terminating threads send a
3366 special "cancel" signal instead of SIGCHLD. Make sure we catch
3367 those (to prevent them from terminating GDB itself, which is
3368 likely to be their default action) and treat them the same way as
3369 SIGCHLD. */
3370
3371 action.sa_handler = sigchld_handler;
3372 sigemptyset (&action.sa_mask);
3373 action.sa_flags = SA_RESTART;
3374 sigaction (cancel, &action, NULL);
3375
3376 /* We block the "cancel" signal throughout this code ... */
3377 sigaddset (&blocked_mask, cancel);
3378 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3379
3380 /* ... except during a sigsuspend. */
3381 sigdelset (&suspend_mask, cancel);
3382 }
3383
This page took 0.098353 seconds and 4 git commands to generate.