1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "linux-low.h"
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
28 #include <sys/ioctl.h>
34 #include <sys/syscall.h>
38 #include <sys/types.h>
43 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
44 then ELFMAG0 will have been defined. If it didn't get included by
45 gdb_proc_service.h then including it will likely introduce a duplicate
46 definition of elf_fpregset_t. */
51 #define SPUFS_MAGIC 0x23c9b64e
54 #ifndef PTRACE_GETSIGINFO
55 # define PTRACE_GETSIGINFO 0x4202
56 # define PTRACE_SETSIGINFO 0x4203
63 /* If the system headers did not provide the constants, hard-code the normal
65 #ifndef PTRACE_EVENT_FORK
67 #define PTRACE_SETOPTIONS 0x4200
68 #define PTRACE_GETEVENTMSG 0x4201
70 /* options set using PTRACE_SETOPTIONS */
71 #define PTRACE_O_TRACESYSGOOD 0x00000001
72 #define PTRACE_O_TRACEFORK 0x00000002
73 #define PTRACE_O_TRACEVFORK 0x00000004
74 #define PTRACE_O_TRACECLONE 0x00000008
75 #define PTRACE_O_TRACEEXEC 0x00000010
76 #define PTRACE_O_TRACEVFORKDONE 0x00000020
77 #define PTRACE_O_TRACEEXIT 0x00000040
79 /* Wait extended result codes for the above trace options. */
80 #define PTRACE_EVENT_FORK 1
81 #define PTRACE_EVENT_VFORK 2
82 #define PTRACE_EVENT_CLONE 3
83 #define PTRACE_EVENT_EXEC 4
84 #define PTRACE_EVENT_VFORK_DONE 5
85 #define PTRACE_EVENT_EXIT 6
87 #endif /* PTRACE_EVENT_FORK */
89 /* We can't always assume that this flag is available, but all systems
90 with the ptrace event handlers also have __WALL, so it's safe to use
93 #define __WALL 0x40000000 /* Wait for any child. */
97 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
101 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
106 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
107 representation of the thread ID.
109 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
110 the same as the LWP ID.
112 ``all_processes'' is keyed by the "overall process ID", which
113 GNU/Linux calls tgid, "thread group ID". */
115 struct inferior_list all_lwps
;
117 /* A list of all unknown processes which receive stop signals. Some other
118 process will presumably claim each of these as forked children
121 struct inferior_list stopped_pids
;
123 /* FIXME this is a bit of a hack, and could be removed. */
124 int stopping_threads
;
126 /* FIXME make into a target method? */
127 int using_threads
= 1;
129 /* This flag is true iff we've just created or attached to our first
130 inferior but it has not stopped yet. As soon as it does, we need
131 to call the low target's arch_setup callback. Doing this only on
132 the first inferior avoids reinializing the architecture on every
133 inferior, and avoids messing with the register caches of the
134 already running inferiors. NOTE: this assumes all inferiors under
135 control of gdbserver have the same architecture. */
136 static int new_inferior
;
138 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
139 int step
, int signal
, siginfo_t
*info
);
140 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
141 static void stop_all_lwps (void);
142 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
143 static void *add_lwp (ptid_t ptid
);
144 static int linux_stopped_by_watchpoint (void);
145 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
146 static int linux_core_of_thread (ptid_t ptid
);
147 static void proceed_all_lwps (void);
148 static void unstop_all_lwps (struct lwp_info
*except
);
149 static int finish_step_over (struct lwp_info
*lwp
);
150 static CORE_ADDR
get_stop_pc (struct lwp_info
*lwp
);
151 static int kill_lwp (unsigned long lwpid
, int signo
);
153 /* True if the low target can hardware single-step. Such targets
154 don't need a BREAKPOINT_REINSERT_ADDR callback. */
157 can_hardware_single_step (void)
159 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
162 /* True if the low target supports memory breakpoints. If so, we'll
163 have a GET_PC implementation. */
166 supports_breakpoints (void)
168 return (the_low_target
.get_pc
!= NULL
);
171 struct pending_signals
175 struct pending_signals
*prev
;
178 #define PTRACE_ARG3_TYPE void *
179 #define PTRACE_ARG4_TYPE void *
180 #define PTRACE_XFER_TYPE long
182 #ifdef HAVE_LINUX_REGSETS
183 static char *disabled_regsets
;
184 static int num_regsets
;
187 /* The read/write ends of the pipe registered as waitable file in the
189 static int linux_event_pipe
[2] = { -1, -1 };
191 /* True if we're currently in async mode. */
192 #define target_is_async_p() (linux_event_pipe[0] != -1)
194 static void send_sigstop (struct inferior_list_entry
*entry
);
195 static void wait_for_sigstop (struct inferior_list_entry
*entry
);
197 /* Accepts an integer PID; Returns a string representing a file that
198 can be opened to get info for the child process.
199 Space for the result is malloc'd, caller must free. */
202 linux_child_pid_to_exec_file (int pid
)
206 name1
= xmalloc (MAXPATHLEN
);
207 name2
= xmalloc (MAXPATHLEN
);
208 memset (name2
, 0, MAXPATHLEN
);
210 sprintf (name1
, "/proc/%d/exe", pid
);
211 if (readlink (name1
, name2
, MAXPATHLEN
) > 0)
223 /* Return non-zero if HEADER is a 64-bit ELF file. */
226 elf_64_header_p (const Elf64_Ehdr
*header
)
228 return (header
->e_ident
[EI_MAG0
] == ELFMAG0
229 && header
->e_ident
[EI_MAG1
] == ELFMAG1
230 && header
->e_ident
[EI_MAG2
] == ELFMAG2
231 && header
->e_ident
[EI_MAG3
] == ELFMAG3
232 && header
->e_ident
[EI_CLASS
] == ELFCLASS64
);
235 /* Return non-zero if FILE is a 64-bit ELF file,
236 zero if the file is not a 64-bit ELF file,
237 and -1 if the file is not accessible or doesn't exist. */
240 elf_64_file_p (const char *file
)
245 fd
= open (file
, O_RDONLY
);
249 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
256 return elf_64_header_p (&header
);
260 delete_lwp (struct lwp_info
*lwp
)
262 remove_thread (get_lwp_thread (lwp
));
263 remove_inferior (&all_lwps
, &lwp
->head
);
264 free (lwp
->arch_private
);
268 /* Add a process to the common process list, and set its private
271 static struct process_info
*
272 linux_add_process (int pid
, int attached
)
274 struct process_info
*proc
;
276 /* Is this the first process? If so, then set the arch. */
277 if (all_processes
.head
== NULL
)
280 proc
= add_process (pid
, attached
);
281 proc
->private = xcalloc (1, sizeof (*proc
->private));
283 if (the_low_target
.new_process
!= NULL
)
284 proc
->private->arch_private
= the_low_target
.new_process ();
289 /* Remove a process from the common process list,
290 also freeing all private data. */
293 linux_remove_process (struct process_info
*process
)
295 struct process_info_private
*priv
= process
->private;
297 free (priv
->arch_private
);
299 remove_process (process
);
302 /* Wrapper function for waitpid which handles EINTR, and emulates
303 __WALL for systems where that is not available. */
306 my_waitpid (int pid
, int *status
, int flags
)
311 fprintf (stderr
, "my_waitpid (%d, 0x%x)\n", pid
, flags
);
315 sigset_t block_mask
, org_mask
, wake_mask
;
318 wnohang
= (flags
& WNOHANG
) != 0;
319 flags
&= ~(__WALL
| __WCLONE
);
322 /* Block all signals while here. This avoids knowing about
323 LinuxThread's signals. */
324 sigfillset (&block_mask
);
325 sigprocmask (SIG_BLOCK
, &block_mask
, &org_mask
);
327 /* ... except during the sigsuspend below. */
328 sigemptyset (&wake_mask
);
332 /* Since all signals are blocked, there's no need to check
334 ret
= waitpid (pid
, status
, flags
);
337 if (ret
== -1 && out_errno
!= ECHILD
)
342 if (flags
& __WCLONE
)
344 /* We've tried both flavors now. If WNOHANG is set,
345 there's nothing else to do, just bail out. */
350 fprintf (stderr
, "blocking\n");
352 /* Block waiting for signals. */
353 sigsuspend (&wake_mask
);
359 sigprocmask (SIG_SETMASK
, &org_mask
, NULL
);
364 ret
= waitpid (pid
, status
, flags
);
365 while (ret
== -1 && errno
== EINTR
);
370 fprintf (stderr
, "my_waitpid (%d, 0x%x): status(%x), %d\n",
371 pid
, flags
, status
? *status
: -1, ret
);
377 /* Handle a GNU/Linux extended wait response. If we see a clone
378 event, we need to add the new LWP to our list (and not report the
379 trap to higher layers). */
382 handle_extended_wait (struct lwp_info
*event_child
, int wstat
)
384 int event
= wstat
>> 16;
385 struct lwp_info
*new_lwp
;
387 if (event
== PTRACE_EVENT_CLONE
)
390 unsigned long new_pid
;
391 int ret
, status
= W_STOPCODE (SIGSTOP
);
393 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_child
), 0, &new_pid
);
395 /* If we haven't already seen the new PID stop, wait for it now. */
396 if (! pull_pid_from_list (&stopped_pids
, new_pid
))
398 /* The new child has a pending SIGSTOP. We can't affect it until it
399 hits the SIGSTOP, but we're already attached. */
401 ret
= my_waitpid (new_pid
, &status
, __WALL
);
404 perror_with_name ("waiting for new child");
405 else if (ret
!= new_pid
)
406 warning ("wait returned unexpected PID %d", ret
);
407 else if (!WIFSTOPPED (status
))
408 warning ("wait returned unexpected status 0x%x", status
);
411 ptrace (PTRACE_SETOPTIONS
, new_pid
, 0, (PTRACE_ARG4_TYPE
) PTRACE_O_TRACECLONE
);
413 ptid
= ptid_build (pid_of (event_child
), new_pid
, 0);
414 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
415 add_thread (ptid
, new_lwp
);
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp
->stopped
= 1;
423 /* Normally we will get the pending SIGSTOP. But in some cases
424 we might get another signal delivered to the group first.
425 If we do get another signal, be sure not to lose it. */
426 if (WSTOPSIG (status
) == SIGSTOP
)
428 if (stopping_threads
)
429 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
431 linux_resume_one_lwp (new_lwp
, 0, 0, NULL
);
435 new_lwp
->stop_expected
= 1;
437 if (stopping_threads
)
439 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
440 new_lwp
->status_pending_p
= 1;
441 new_lwp
->status_pending
= status
;
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
446 linux_resume_one_lwp (new_lwp
, 0, WSTOPSIG (status
), NULL
);
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
452 linux_resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
456 /* Return the PC as read from the regcache of LWP, without any
460 get_pc (struct lwp_info
*lwp
)
462 struct thread_info
*saved_inferior
;
463 struct regcache
*regcache
;
466 if (the_low_target
.get_pc
== NULL
)
469 saved_inferior
= current_inferior
;
470 current_inferior
= get_lwp_thread (lwp
);
472 regcache
= get_thread_regcache (current_inferior
, 1);
473 pc
= (*the_low_target
.get_pc
) (regcache
);
476 fprintf (stderr
, "pc is 0x%lx\n", (long) pc
);
478 current_inferior
= saved_inferior
;
482 /* This function should only be called if LWP got a SIGTRAP.
483 The SIGTRAP could mean several things.
485 On i386, where decr_pc_after_break is non-zero:
486 If we were single-stepping this process using PTRACE_SINGLESTEP,
487 we will get only the one SIGTRAP (even if the instruction we
488 stepped over was a breakpoint). The value of $eip will be the
490 If we continue the process using PTRACE_CONT, we will get a
491 SIGTRAP when we hit a breakpoint. The value of $eip will be
492 the instruction after the breakpoint (i.e. needs to be
493 decremented). If we report the SIGTRAP to GDB, we must also
494 report the undecremented PC. If we cancel the SIGTRAP, we
495 must resume at the decremented PC.
497 (Presumably, not yet tested) On a non-decr_pc_after_break machine
498 with hardware or kernel single-step:
499 If we single-step over a breakpoint instruction, our PC will
500 point at the following instruction. If we continue and hit a
501 breakpoint instruction, our PC will point at the breakpoint
505 get_stop_pc (struct lwp_info
*lwp
)
509 if (the_low_target
.get_pc
== NULL
)
512 stop_pc
= get_pc (lwp
);
514 if (WSTOPSIG (lwp
->last_status
) == SIGTRAP
516 && !lwp
->stopped_by_watchpoint
517 && lwp
->last_status
>> 16 == 0)
518 stop_pc
-= the_low_target
.decr_pc_after_break
;
521 fprintf (stderr
, "stop pc is 0x%lx\n", (long) stop_pc
);
527 add_lwp (ptid_t ptid
)
529 struct lwp_info
*lwp
;
531 lwp
= (struct lwp_info
*) xmalloc (sizeof (*lwp
));
532 memset (lwp
, 0, sizeof (*lwp
));
536 lwp
->last_resume_kind
= resume_continue
;
538 if (the_low_target
.new_thread
!= NULL
)
539 lwp
->arch_private
= the_low_target
.new_thread ();
541 add_inferior_to_list (&all_lwps
, &lwp
->head
);
546 /* Start an inferior process and returns its pid.
547 ALLARGS is a vector of program-name and args. */
550 linux_create_inferior (char *program
, char **allargs
)
552 struct lwp_info
*new_lwp
;
556 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
562 perror_with_name ("fork");
566 ptrace (PTRACE_TRACEME
, 0, 0, 0);
568 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
569 signal (__SIGRTMIN
+ 1, SIG_DFL
);
574 execv (program
, allargs
);
576 execvp (program
, allargs
);
578 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
584 linux_add_process (pid
, 0);
586 ptid
= ptid_build (pid
, pid
, 0);
587 new_lwp
= add_lwp (ptid
);
588 add_thread (ptid
, new_lwp
);
589 new_lwp
->must_set_ptrace_flags
= 1;
594 /* Attach to an inferior process. */
597 linux_attach_lwp_1 (unsigned long lwpid
, int initial
)
600 struct lwp_info
*new_lwp
;
602 if (ptrace (PTRACE_ATTACH
, lwpid
, 0, 0) != 0)
606 /* If we fail to attach to an LWP, just warn. */
607 fprintf (stderr
, "Cannot attach to lwp %ld: %s (%d)\n", lwpid
,
608 strerror (errno
), errno
);
613 /* If we fail to attach to a process, report an error. */
614 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid
,
615 strerror (errno
), errno
);
619 /* NOTE/FIXME: This lwp might have not been the tgid. */
620 ptid
= ptid_build (lwpid
, lwpid
, 0);
623 /* Note that extracting the pid from the current inferior is
624 safe, since we're always called in the context of the same
625 process as this new thread. */
626 int pid
= pid_of (get_thread_lwp (current_inferior
));
627 ptid
= ptid_build (pid
, lwpid
, 0);
630 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
631 add_thread (ptid
, new_lwp
);
633 /* We need to wait for SIGSTOP before being able to make the next
634 ptrace call on this LWP. */
635 new_lwp
->must_set_ptrace_flags
= 1;
637 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
640 There are several cases to consider here:
642 1) gdbserver has already attached to the process and is being notified
643 of a new thread that is being created.
644 In this case we should ignore that SIGSTOP and resume the
645 process. This is handled below by setting stop_expected = 1,
646 and the fact that add_lwp sets last_resume_kind ==
649 2) This is the first thread (the process thread), and we're attaching
650 to it via attach_inferior.
651 In this case we want the process thread to stop.
652 This is handled by having linux_attach set last_resume_kind ==
653 resume_stop after we return.
654 ??? If the process already has several threads we leave the other
657 3) GDB is connecting to gdbserver and is requesting an enumeration of all
659 In this case we want the thread to stop.
660 FIXME: This case is currently not properly handled.
661 We should wait for the SIGSTOP but don't. Things work apparently
662 because enough time passes between when we ptrace (ATTACH) and when
663 gdb makes the next ptrace call on the thread.
665 On the other hand, if we are currently trying to stop all threads, we
666 should treat the new thread as if we had sent it a SIGSTOP. This works
667 because we are guaranteed that the add_lwp call above added us to the
668 end of the list, and so the new thread has not yet reached
669 wait_for_sigstop (but will). */
670 new_lwp
->stop_expected
= 1;
674 linux_attach_lwp (unsigned long lwpid
)
676 linux_attach_lwp_1 (lwpid
, 0);
680 linux_attach (unsigned long pid
)
682 struct lwp_info
*lwp
;
684 linux_attach_lwp_1 (pid
, 1);
686 linux_add_process (pid
, 1);
690 /* Don't ignore the initial SIGSTOP if we just attached to this
691 process. It will be collected by wait shortly. */
692 lwp
= (struct lwp_info
*) find_inferior_id (&all_lwps
,
693 ptid_build (pid
, pid
, 0));
694 lwp
->last_resume_kind
= resume_stop
;
707 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
709 struct counter
*counter
= args
;
711 if (ptid_get_pid (entry
->id
) == counter
->pid
)
713 if (++counter
->count
> 1)
721 last_thread_of_process_p (struct thread_info
*thread
)
723 ptid_t ptid
= ((struct inferior_list_entry
*)thread
)->id
;
724 int pid
= ptid_get_pid (ptid
);
725 struct counter counter
= { pid
, 0 };
727 return (find_inferior (&all_threads
,
728 second_thread_of_pid_p
, &counter
) == NULL
);
731 /* Kill the inferior lwp. */
734 linux_kill_one_lwp (struct inferior_list_entry
*entry
, void *args
)
736 struct thread_info
*thread
= (struct thread_info
*) entry
;
737 struct lwp_info
*lwp
= get_thread_lwp (thread
);
739 int pid
= * (int *) args
;
741 if (ptid_get_pid (entry
->id
) != pid
)
744 /* We avoid killing the first thread here, because of a Linux kernel (at
745 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
746 the children get a chance to be reaped, it will remain a zombie
749 if (lwpid_of (lwp
) == pid
)
752 fprintf (stderr
, "lkop: is last of process %s\n",
753 target_pid_to_str (entry
->id
));
757 /* If we're killing a running inferior, make sure it is stopped
758 first, as PTRACE_KILL will not work otherwise. */
760 send_sigstop (&lwp
->head
);
764 ptrace (PTRACE_KILL
, lwpid_of (lwp
), 0, 0);
766 /* Make sure it died. The loop is most likely unnecessary. */
767 pid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
768 } while (pid
> 0 && WIFSTOPPED (wstat
));
776 struct process_info
*process
;
777 struct lwp_info
*lwp
;
778 struct thread_info
*thread
;
782 process
= find_process_pid (pid
);
786 find_inferior (&all_threads
, linux_kill_one_lwp
, &pid
);
788 /* See the comment in linux_kill_one_lwp. We did not kill the first
789 thread in the list, so do so now. */
790 lwp
= find_lwp_pid (pid_to_ptid (pid
));
791 thread
= get_lwp_thread (lwp
);
794 fprintf (stderr
, "lk_1: killing lwp %ld, for pid: %d\n",
795 lwpid_of (lwp
), pid
);
797 /* If we're killing a running inferior, make sure it is stopped
798 first, as PTRACE_KILL will not work otherwise. */
800 send_sigstop (&lwp
->head
);
804 ptrace (PTRACE_KILL
, lwpid_of (lwp
), 0, 0);
806 /* Make sure it died. The loop is most likely unnecessary. */
807 lwpid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
808 } while (lwpid
> 0 && WIFSTOPPED (wstat
));
811 thread_db_free (process
, 0);
814 linux_remove_process (process
);
819 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
821 struct thread_info
*thread
= (struct thread_info
*) entry
;
822 struct lwp_info
*lwp
= get_thread_lwp (thread
);
823 int pid
= * (int *) args
;
825 if (ptid_get_pid (entry
->id
) != pid
)
828 /* If we're detaching from a running inferior, make sure it is
829 stopped first, as PTRACE_DETACH will not work otherwise. */
832 int lwpid
= lwpid_of (lwp
);
834 stopping_threads
= 1;
835 send_sigstop (&lwp
->head
);
837 /* If this detects a new thread through a clone event, the new
838 thread is appended to the end of the lwp list, so we'll
839 eventually detach from it. */
840 wait_for_sigstop (&lwp
->head
);
841 stopping_threads
= 0;
843 /* If LWP exits while we're trying to stop it, there's nothing
845 lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
850 /* If this process is stopped but is expecting a SIGSTOP, then make
851 sure we take care of that now. This isn't absolutely guaranteed
852 to collect the SIGSTOP, but is fairly likely to. */
853 if (lwp
->stop_expected
)
856 /* Clear stop_expected, so that the SIGSTOP will be reported. */
857 lwp
->stop_expected
= 0;
859 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
860 linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
863 /* Flush any pending changes to the process's registers. */
864 regcache_invalidate_one ((struct inferior_list_entry
*)
865 get_lwp_thread (lwp
));
867 /* Finally, let it resume. */
868 ptrace (PTRACE_DETACH
, lwpid_of (lwp
), 0, 0);
875 any_thread_of (struct inferior_list_entry
*entry
, void *args
)
879 if (ptid_get_pid (entry
->id
) == *pid_p
)
886 linux_detach (int pid
)
888 struct process_info
*process
;
890 process
= find_process_pid (pid
);
895 thread_db_free (process
, 1);
899 (struct thread_info
*) find_inferior (&all_threads
, any_thread_of
, &pid
);
901 delete_all_breakpoints ();
902 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
903 linux_remove_process (process
);
911 struct process_info
*process
;
913 process
= find_process_pid (pid
);
918 ret
= my_waitpid (pid
, &status
, 0);
919 if (WIFEXITED (status
) || WIFSIGNALED (status
))
921 } while (ret
!= -1 || errno
!= ECHILD
);
924 /* Return nonzero if the given thread is still alive. */
926 linux_thread_alive (ptid_t ptid
)
928 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
930 /* We assume we always know if a thread exits. If a whole process
931 exited but we still haven't been able to report it to GDB, we'll
932 hold on to the last lwp of the dead process. */
939 /* Return 1 if this lwp has an interesting status pending. */
941 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
943 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
944 ptid_t ptid
= * (ptid_t
*) arg
;
945 struct thread_info
*thread
= get_lwp_thread (lwp
);
947 /* Check if we're only interested in events from a specific process
949 if (!ptid_equal (minus_one_ptid
, ptid
)
950 && ptid_get_pid (ptid
) != ptid_get_pid (lwp
->head
.id
))
953 thread
= get_lwp_thread (lwp
);
955 /* If we got a `vCont;t', but we haven't reported a stop yet, do
956 report any status pending the LWP may have. */
957 if (lwp
->last_resume_kind
== resume_stop
958 && thread
->last_status
.kind
== TARGET_WAITKIND_STOPPED
)
961 return lwp
->status_pending_p
;
965 same_lwp (struct inferior_list_entry
*entry
, void *data
)
967 ptid_t ptid
= *(ptid_t
*) data
;
970 if (ptid_get_lwp (ptid
) != 0)
971 lwp
= ptid_get_lwp (ptid
);
973 lwp
= ptid_get_pid (ptid
);
975 if (ptid_get_lwp (entry
->id
) == lwp
)
982 find_lwp_pid (ptid_t ptid
)
984 return (struct lwp_info
*) find_inferior (&all_lwps
, same_lwp
, &ptid
);
987 static struct lwp_info
*
988 linux_wait_for_lwp (ptid_t ptid
, int *wstatp
, int options
)
991 int to_wait_for
= -1;
992 struct lwp_info
*child
= NULL
;
995 fprintf (stderr
, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid
));
997 if (ptid_equal (ptid
, minus_one_ptid
))
998 to_wait_for
= -1; /* any child */
1000 to_wait_for
= ptid_get_lwp (ptid
); /* this lwp only */
1006 ret
= my_waitpid (to_wait_for
, wstatp
, options
);
1007 if (ret
== 0 || (ret
== -1 && errno
== ECHILD
&& (options
& WNOHANG
)))
1010 perror_with_name ("waitpid");
1013 && (!WIFSTOPPED (*wstatp
)
1014 || (WSTOPSIG (*wstatp
) != 32
1015 && WSTOPSIG (*wstatp
) != 33)))
1016 fprintf (stderr
, "Got an event from %d (%x)\n", ret
, *wstatp
);
1018 child
= find_lwp_pid (pid_to_ptid (ret
));
1020 /* If we didn't find a process, one of two things presumably happened:
1021 - A process we started and then detached from has exited. Ignore it.
1022 - A process we are controlling has forked and the new child's stop
1023 was reported to us by the kernel. Save its PID. */
1024 if (child
== NULL
&& WIFSTOPPED (*wstatp
))
1026 add_pid_to_list (&stopped_pids
, ret
);
1029 else if (child
== NULL
)
1034 child
->last_status
= *wstatp
;
1036 /* Architecture-specific setup after inferior is running.
1037 This needs to happen after we have attached to the inferior
1038 and it is stopped for the first time, but before we access
1039 any inferior registers. */
1042 the_low_target
.arch_setup ();
1043 #ifdef HAVE_LINUX_REGSETS
1044 memset (disabled_regsets
, 0, num_regsets
);
1049 /* Fetch the possibly triggered data watchpoint info and store it in
1052 On some archs, like x86, that use debug registers to set
1053 watchpoints, it's possible that the way to know which watched
1054 address trapped, is to check the register that is used to select
1055 which address to watch. Problem is, between setting the
1056 watchpoint and reading back which data address trapped, the user
1057 may change the set of watchpoints, and, as a consequence, GDB
1058 changes the debug registers in the inferior. To avoid reading
1059 back a stale stopped-data-address when that happens, we cache in
1060 LP the fact that a watchpoint trapped, and the corresponding data
1061 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1062 changes the debug registers meanwhile, we have the cached data we
1065 if (WIFSTOPPED (*wstatp
) && WSTOPSIG (*wstatp
) == SIGTRAP
)
1067 if (the_low_target
.stopped_by_watchpoint
== NULL
)
1069 child
->stopped_by_watchpoint
= 0;
1073 struct thread_info
*saved_inferior
;
1075 saved_inferior
= current_inferior
;
1076 current_inferior
= get_lwp_thread (child
);
1078 child
->stopped_by_watchpoint
1079 = the_low_target
.stopped_by_watchpoint ();
1081 if (child
->stopped_by_watchpoint
)
1083 if (the_low_target
.stopped_data_address
!= NULL
)
1084 child
->stopped_data_address
1085 = the_low_target
.stopped_data_address ();
1087 child
->stopped_data_address
= 0;
1090 current_inferior
= saved_inferior
;
1094 /* Store the STOP_PC, with adjustment applied. This depends on the
1095 architecture being defined already (so that CHILD has a valid
1096 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1098 if (WIFSTOPPED (*wstatp
))
1099 child
->stop_pc
= get_stop_pc (child
);
1102 && WIFSTOPPED (*wstatp
)
1103 && the_low_target
.get_pc
!= NULL
)
1105 struct thread_info
*saved_inferior
= current_inferior
;
1106 struct regcache
*regcache
;
1109 current_inferior
= get_lwp_thread (child
);
1110 regcache
= get_thread_regcache (current_inferior
, 1);
1111 pc
= (*the_low_target
.get_pc
) (regcache
);
1112 fprintf (stderr
, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc
);
1113 current_inferior
= saved_inferior
;
1119 /* Arrange for a breakpoint to be hit again later. We don't keep the
1120 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1121 will handle the current event, eventually we will resume this LWP,
1122 and this breakpoint will trap again. */
1125 cancel_breakpoint (struct lwp_info
*lwp
)
1127 struct thread_info
*saved_inferior
;
1128 struct regcache
*regcache
;
1130 /* There's nothing to do if we don't support breakpoints. */
1131 if (!supports_breakpoints ())
1134 regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
1136 /* breakpoint_at reads from current inferior. */
1137 saved_inferior
= current_inferior
;
1138 current_inferior
= get_lwp_thread (lwp
);
1140 if ((*the_low_target
.breakpoint_at
) (lwp
->stop_pc
))
1144 "CB: Push back breakpoint for %s\n",
1145 target_pid_to_str (lwp
->head
.id
));
1147 /* Back up the PC if necessary. */
1148 if (the_low_target
.decr_pc_after_break
)
1150 struct regcache
*regcache
1151 = get_thread_regcache (get_lwp_thread (lwp
), 1);
1152 (*the_low_target
.set_pc
) (regcache
, lwp
->stop_pc
);
1155 current_inferior
= saved_inferior
;
1162 "CB: No breakpoint found at %s for [%s]\n",
1163 paddress (lwp
->stop_pc
),
1164 target_pid_to_str (lwp
->head
.id
));
1167 current_inferior
= saved_inferior
;
1171 /* When the event-loop is doing a step-over, this points at the thread
1173 ptid_t step_over_bkpt
;
1175 /* Wait for an event from child PID. If PID is -1, wait for any
1176 child. Store the stop status through the status pointer WSTAT.
1177 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1178 event was found and OPTIONS contains WNOHANG. Return the PID of
1179 the stopped child otherwise. */
1182 linux_wait_for_event_1 (ptid_t ptid
, int *wstat
, int options
)
1184 struct lwp_info
*event_child
, *requested_child
;
1187 requested_child
= NULL
;
1189 /* Check for a lwp with a pending status. */
1191 if (ptid_equal (ptid
, minus_one_ptid
)
1192 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid
)), ptid
))
1194 event_child
= (struct lwp_info
*)
1195 find_inferior (&all_lwps
, status_pending_p_callback
, &ptid
);
1196 if (debug_threads
&& event_child
)
1197 fprintf (stderr
, "Got a pending child %ld\n", lwpid_of (event_child
));
1201 requested_child
= find_lwp_pid (ptid
);
1203 if (requested_child
->status_pending_p
)
1204 event_child
= requested_child
;
1207 if (event_child
!= NULL
)
1210 fprintf (stderr
, "Got an event from pending child %ld (%04x)\n",
1211 lwpid_of (event_child
), event_child
->status_pending
);
1212 *wstat
= event_child
->status_pending
;
1213 event_child
->status_pending_p
= 0;
1214 event_child
->status_pending
= 0;
1215 current_inferior
= get_lwp_thread (event_child
);
1216 return lwpid_of (event_child
);
1219 /* We only enter this loop if no process has a pending wait status. Thus
1220 any action taken in response to a wait status inside this loop is
1221 responding as soon as we detect the status, not after any pending
1225 event_child
= linux_wait_for_lwp (ptid
, wstat
, options
);
1227 if ((options
& WNOHANG
) && event_child
== NULL
)
1230 fprintf (stderr
, "WNOHANG set, no event found\n");
1234 if (event_child
== NULL
)
1235 error ("event from unknown child");
1237 current_inferior
= get_lwp_thread (event_child
);
1239 /* Check for thread exit. */
1240 if (! WIFSTOPPED (*wstat
))
1243 fprintf (stderr
, "LWP %ld exiting\n", lwpid_of (event_child
));
1245 /* If the last thread is exiting, just return. */
1246 if (last_thread_of_process_p (current_inferior
))
1249 fprintf (stderr
, "LWP %ld is last lwp of process\n",
1250 lwpid_of (event_child
));
1251 return lwpid_of (event_child
);
1256 current_inferior
= (struct thread_info
*) all_threads
.head
;
1258 fprintf (stderr
, "Current inferior is now %ld\n",
1259 lwpid_of (get_thread_lwp (current_inferior
)));
1263 current_inferior
= NULL
;
1265 fprintf (stderr
, "Current inferior is now <NULL>\n");
1268 /* If we were waiting for this particular child to do something...
1269 well, it did something. */
1270 if (requested_child
!= NULL
)
1272 int lwpid
= lwpid_of (event_child
);
1274 /* Cancel the step-over operation --- the thread that
1275 started it is gone. */
1276 if (finish_step_over (event_child
))
1277 unstop_all_lwps (event_child
);
1278 delete_lwp (event_child
);
1282 delete_lwp (event_child
);
1284 /* Wait for a more interesting event. */
1288 if (event_child
->must_set_ptrace_flags
)
1290 ptrace (PTRACE_SETOPTIONS
, lwpid_of (event_child
),
1291 0, (PTRACE_ARG4_TYPE
) PTRACE_O_TRACECLONE
);
1292 event_child
->must_set_ptrace_flags
= 0;
1295 if (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) == SIGTRAP
1296 && *wstat
>> 16 != 0)
1298 handle_extended_wait (event_child
, *wstat
);
1302 /* If GDB is not interested in this signal, don't stop other
1303 threads, and don't report it to GDB. Just resume the
1304 inferior right away. We do this for threading-related
1305 signals as well as any that GDB specifically requested we
1306 ignore. But never ignore SIGSTOP if we sent it ourselves,
1307 and do not ignore signals when stepping - they may require
1308 special handling to skip the signal handler. */
1309 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1311 if (WIFSTOPPED (*wstat
)
1312 && !event_child
->stepping
1314 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1315 (current_process ()->private->thread_db
!= NULL
1316 && (WSTOPSIG (*wstat
) == __SIGRTMIN
1317 || WSTOPSIG (*wstat
) == __SIGRTMIN
+ 1))
1320 (pass_signals
[target_signal_from_host (WSTOPSIG (*wstat
))]
1321 && !(WSTOPSIG (*wstat
) == SIGSTOP
1322 && event_child
->stop_expected
))))
1324 siginfo_t info
, *info_p
;
1327 fprintf (stderr
, "Ignored signal %d for LWP %ld.\n",
1328 WSTOPSIG (*wstat
), lwpid_of (event_child
));
1330 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (event_child
), 0, &info
) == 0)
1334 linux_resume_one_lwp (event_child
, event_child
->stepping
,
1335 WSTOPSIG (*wstat
), info_p
);
1339 if (WIFSTOPPED (*wstat
)
1340 && WSTOPSIG (*wstat
) == SIGSTOP
1341 && event_child
->stop_expected
)
1346 fprintf (stderr
, "Expected stop.\n");
1347 event_child
->stop_expected
= 0;
1349 should_stop
= (event_child
->last_resume_kind
== resume_stop
1350 || stopping_threads
);
1354 linux_resume_one_lwp (event_child
,
1355 event_child
->stepping
, 0, NULL
);
1360 return lwpid_of (event_child
);
1368 linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
)
1372 if (ptid_is_pid (ptid
))
1374 /* A request to wait for a specific tgid. This is not possible
1375 with waitpid, so instead, we wait for any child, and leave
1376 children we're not interested in right now with a pending
1377 status to report later. */
1378 wait_ptid
= minus_one_ptid
;
1387 event_pid
= linux_wait_for_event_1 (wait_ptid
, wstat
, options
);
1390 && ptid_is_pid (ptid
) && ptid_get_pid (ptid
) != event_pid
)
1392 struct lwp_info
*event_child
= find_lwp_pid (pid_to_ptid (event_pid
));
1394 if (! WIFSTOPPED (*wstat
))
1395 mark_lwp_dead (event_child
, *wstat
);
1398 event_child
->status_pending_p
= 1;
1399 event_child
->status_pending
= *wstat
;
1408 /* Count the LWP's that have had events. */
1411 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
1413 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1416 gdb_assert (count
!= NULL
);
1418 /* Count only resumed LWPs that have a SIGTRAP event pending that
1419 should be reported to GDB. */
1420 if (get_lwp_thread (lp
)->last_status
.kind
== TARGET_WAITKIND_IGNORE
1421 && lp
->last_resume_kind
!= resume_stop
1422 && lp
->status_pending_p
1423 && WIFSTOPPED (lp
->status_pending
)
1424 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1425 && !breakpoint_inserted_here (lp
->stop_pc
))
1431 /* Select the LWP (if any) that is currently being single-stepped. */
1434 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
1436 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1438 if (get_lwp_thread (lp
)->last_status
.kind
== TARGET_WAITKIND_IGNORE
1439 && lp
->last_resume_kind
== resume_step
1440 && lp
->status_pending_p
)
1446 /* Select the Nth LWP that has had a SIGTRAP event that should be
1450 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
1452 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1453 int *selector
= data
;
1455 gdb_assert (selector
!= NULL
);
1457 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1458 if (lp
->last_resume_kind
!= resume_stop
1459 && get_lwp_thread (lp
)->last_status
.kind
== TARGET_WAITKIND_IGNORE
1460 && lp
->status_pending_p
1461 && WIFSTOPPED (lp
->status_pending
)
1462 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1463 && !breakpoint_inserted_here (lp
->stop_pc
))
1464 if ((*selector
)-- == 0)
1471 cancel_breakpoints_callback (struct inferior_list_entry
*entry
, void *data
)
1473 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1474 struct lwp_info
*event_lp
= data
;
1476 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1480 /* If a LWP other than the LWP that we're reporting an event for has
1481 hit a GDB breakpoint (as opposed to some random trap signal),
1482 then just arrange for it to hit it again later. We don't keep
1483 the SIGTRAP status and don't forward the SIGTRAP signal to the
1484 LWP. We will handle the current event, eventually we will resume
1485 all LWPs, and this one will get its breakpoint trap again.
1487 If we do not do this, then we run the risk that the user will
1488 delete or disable the breakpoint, but the LWP will have already
1491 if (lp
->last_resume_kind
!= resume_stop
1492 && get_lwp_thread (lp
)->last_status
.kind
== TARGET_WAITKIND_IGNORE
1493 && lp
->status_pending_p
1494 && WIFSTOPPED (lp
->status_pending
)
1495 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1497 && !lp
->stopped_by_watchpoint
1498 && cancel_breakpoint (lp
))
1499 /* Throw away the SIGTRAP. */
1500 lp
->status_pending_p
= 0;
1505 /* Select one LWP out of those that have events pending. */
1508 select_event_lwp (struct lwp_info
**orig_lp
)
1511 int random_selector
;
1512 struct lwp_info
*event_lp
;
1514 /* Give preference to any LWP that is being single-stepped. */
1516 = (struct lwp_info
*) find_inferior (&all_lwps
,
1517 select_singlestep_lwp_callback
, NULL
);
1518 if (event_lp
!= NULL
)
1522 "SEL: Select single-step %s\n",
1523 target_pid_to_str (ptid_of (event_lp
)));
1527 /* No single-stepping LWP. Select one at random, out of those
1528 which have had SIGTRAP events. */
1530 /* First see how many SIGTRAP events we have. */
1531 find_inferior (&all_lwps
, count_events_callback
, &num_events
);
1533 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1534 random_selector
= (int)
1535 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
1537 if (debug_threads
&& num_events
> 1)
1539 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1540 num_events
, random_selector
);
1542 event_lp
= (struct lwp_info
*) find_inferior (&all_lwps
,
1543 select_event_lwp_callback
,
1547 if (event_lp
!= NULL
)
1549 /* Switch the event LWP. */
1550 *orig_lp
= event_lp
;
1554 /* Set this inferior LWP's state as "want-stopped". We won't resume
1555 this LWP until the client gives us another action for it. */
1558 gdb_wants_lwp_stopped (struct inferior_list_entry
*entry
)
1560 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
1561 struct thread_info
*thread
= get_lwp_thread (lwp
);
1563 /* Most threads are stopped implicitly (all-stop); tag that with
1564 signal 0. The thread being explicitly reported stopped to the
1565 client, gets it's status fixed up afterwards. */
1566 thread
->last_status
.kind
= TARGET_WAITKIND_STOPPED
;
1567 thread
->last_status
.value
.sig
= TARGET_SIGNAL_0
;
1569 lwp
->last_resume_kind
= resume_stop
;
1572 /* Set all LWP's states as "want-stopped". */
1575 gdb_wants_all_stopped (void)
1577 for_each_inferior (&all_lwps
, gdb_wants_lwp_stopped
);
1580 /* Wait for process, returns status. */
1583 linux_wait_1 (ptid_t ptid
,
1584 struct target_waitstatus
*ourstatus
, int target_options
)
1587 struct thread_info
*thread
= NULL
;
1588 struct lwp_info
*event_child
= NULL
;
1591 int step_over_finished
;
1592 int bp_explains_trap
;
1593 int maybe_internal_trap
;
1596 /* Translate generic target options into linux options. */
1598 if (target_options
& TARGET_WNOHANG
)
1602 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
1604 /* If we were only supposed to resume one thread, only wait for
1605 that thread - if it's still alive. If it died, however - which
1606 can happen if we're coming from the thread death case below -
1607 then we need to make sure we restart the other threads. We could
1608 pick a thread at random or restart all; restarting all is less
1611 && !ptid_equal (cont_thread
, null_ptid
)
1612 && !ptid_equal (cont_thread
, minus_one_ptid
))
1614 thread
= (struct thread_info
*) find_inferior_id (&all_threads
,
1617 /* No stepping, no signal - unless one is pending already, of course. */
1620 struct thread_resume resume_info
;
1621 resume_info
.thread
= minus_one_ptid
;
1622 resume_info
.kind
= resume_continue
;
1623 resume_info
.sig
= 0;
1624 linux_resume (&resume_info
, 1);
1630 if (ptid_equal (step_over_bkpt
, null_ptid
))
1631 pid
= linux_wait_for_event (ptid
, &w
, options
);
1635 fprintf (stderr
, "step_over_bkpt set [%s], doing a blocking wait\n",
1636 target_pid_to_str (step_over_bkpt
));
1637 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
1640 if (pid
== 0) /* only if TARGET_WNOHANG */
1643 event_child
= get_thread_lwp (current_inferior
);
1645 /* If we are waiting for a particular child, and it exited,
1646 linux_wait_for_event will return its exit status. Similarly if
1647 the last child exited. If this is not the last child, however,
1648 do not report it as exited until there is a 'thread exited' response
1649 available in the remote protocol. Instead, just wait for another event.
1650 This should be safe, because if the thread crashed we will already
1651 have reported the termination signal to GDB; that should stop any
1652 in-progress stepping operations, etc.
1654 Report the exit status of the last thread to exit. This matches
1655 LinuxThreads' behavior. */
1657 if (last_thread_of_process_p (current_inferior
))
1659 if (WIFEXITED (w
) || WIFSIGNALED (w
))
1661 int pid
= pid_of (event_child
);
1662 struct process_info
*process
= find_process_pid (pid
);
1664 #ifdef USE_THREAD_DB
1665 thread_db_free (process
, 0);
1667 delete_lwp (event_child
);
1668 linux_remove_process (process
);
1670 current_inferior
= NULL
;
1674 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
1675 ourstatus
->value
.integer
= WEXITSTATUS (w
);
1678 fprintf (stderr
, "\nChild exited with retcode = %x \n", WEXITSTATUS (w
));
1682 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
1683 ourstatus
->value
.sig
= target_signal_from_host (WTERMSIG (w
));
1686 fprintf (stderr
, "\nChild terminated with signal = %x \n", WTERMSIG (w
));
1690 return pid_to_ptid (pid
);
1695 if (!WIFSTOPPED (w
))
1699 /* If this event was not handled before, and is not a SIGTRAP, we
1700 report it. SIGILL and SIGSEGV are also treated as traps in case
1701 a breakpoint is inserted at the current PC. If this target does
1702 not support internal breakpoints at all, we also report the
1703 SIGTRAP without further processing; it's of no concern to us. */
1705 = (supports_breakpoints ()
1706 && (WSTOPSIG (w
) == SIGTRAP
1707 || ((WSTOPSIG (w
) == SIGILL
1708 || WSTOPSIG (w
) == SIGSEGV
)
1709 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
1711 if (maybe_internal_trap
)
1713 /* Handle anything that requires bookkeeping before deciding to
1714 report the event or continue waiting. */
1716 /* First check if we can explain the SIGTRAP with an internal
1717 breakpoint, or if we should possibly report the event to GDB.
1718 Do this before anything that may remove or insert a
1720 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
1722 /* We have a SIGTRAP, possibly a step-over dance has just
1723 finished. If so, tweak the state machine accordingly,
1724 reinsert breakpoints and delete any reinsert (software
1725 single-step) breakpoints. */
1726 step_over_finished
= finish_step_over (event_child
);
1728 /* Now invoke the callbacks of any internal breakpoints there. */
1729 check_breakpoints (event_child
->stop_pc
);
1731 if (bp_explains_trap
)
1733 /* If we stepped or ran into an internal breakpoint, we've
1734 already handled it. So next time we resume (from this
1735 PC), we should step over it. */
1737 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
1739 if (breakpoint_here (event_child
->stop_pc
))
1740 event_child
->need_step_over
= 1;
1745 /* We have some other signal, possibly a step-over dance was in
1746 progress, and it should be cancelled too. */
1747 step_over_finished
= finish_step_over (event_child
);
1750 /* We have all the data we need. Either report the event to GDB, or
1751 resume threads and keep waiting for more. */
1753 /* Check If GDB would be interested in this event. If GDB wanted
1754 this thread to single step, we always want to report the SIGTRAP,
1755 and let GDB handle it. Watchpoints should always be reported.
1756 So should signals we can't explain. A SIGTRAP we can't explain
1757 could be a GDB breakpoint --- we may or not support Z0
1758 breakpoints. If we do, we're be able to handle GDB breakpoints
1759 on top of internal breakpoints, by handling the internal
1760 breakpoint and still reporting the event to GDB. If we don't,
1761 we're out of luck, GDB won't see the breakpoint hit. */
1762 report_to_gdb
= (!maybe_internal_trap
1763 || event_child
->last_resume_kind
== resume_step
1764 || event_child
->stopped_by_watchpoint
1765 || (!step_over_finished
&& !bp_explains_trap
)
1766 || gdb_breakpoint_here (event_child
->stop_pc
));
1768 /* We found no reason GDB would want us to stop. We either hit one
1769 of our own breakpoints, or finished an internal step GDB
1770 shouldn't know about. */
1775 if (bp_explains_trap
)
1776 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
1777 if (step_over_finished
)
1778 fprintf (stderr
, "Step-over finished.\n");
1781 /* We're not reporting this breakpoint to GDB, so apply the
1782 decr_pc_after_break adjustment to the inferior's regcache
1785 if (the_low_target
.set_pc
!= NULL
)
1787 struct regcache
*regcache
1788 = get_thread_regcache (get_lwp_thread (event_child
), 1);
1789 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
1792 /* We've finished stepping over a breakpoint. We've stopped all
1793 LWPs momentarily except the stepping one. This is where we
1794 resume them all again. We're going to keep waiting, so use
1795 proceed, which handles stepping over the next breakpoint. */
1797 fprintf (stderr
, "proceeding all threads.\n");
1798 proceed_all_lwps ();
1804 if (event_child
->last_resume_kind
== resume_step
)
1805 fprintf (stderr
, "GDB wanted to single-step, reporting event.\n");
1806 if (event_child
->stopped_by_watchpoint
)
1807 fprintf (stderr
, "Stopped by watchpoint.\n");
1808 if (gdb_breakpoint_here (event_child
->stop_pc
))
1809 fprintf (stderr
, "Stopped by GDB breakpoint.\n");
1811 fprintf (stderr
, "Hit a non-gdbserver trap event.\n");
1814 /* Alright, we're going to report a stop. */
1818 /* In all-stop, stop all threads. */
1821 /* If we're not waiting for a specific LWP, choose an event LWP
1822 from among those that have had events. Giving equal priority
1823 to all LWPs that have had events helps prevent
1825 if (ptid_equal (ptid
, minus_one_ptid
))
1827 event_child
->status_pending_p
= 1;
1828 event_child
->status_pending
= w
;
1830 select_event_lwp (&event_child
);
1832 event_child
->status_pending_p
= 0;
1833 w
= event_child
->status_pending
;
1836 /* Now that we've selected our final event LWP, cancel any
1837 breakpoints in other LWPs that have hit a GDB breakpoint.
1838 See the comment in cancel_breakpoints_callback to find out
1840 find_inferior (&all_lwps
, cancel_breakpoints_callback
, event_child
);
1844 /* If we just finished a step-over, then all threads had been
1845 momentarily paused. In all-stop, that's fine, we want
1846 threads stopped by now anyway. In non-stop, we need to
1847 re-resume threads that GDB wanted to be running. */
1848 if (step_over_finished
)
1849 unstop_all_lwps (event_child
);
1852 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
1854 /* Do this before the gdb_wants_all_stopped calls below, since they
1855 always set last_resume_kind to resume_stop. */
1856 if (event_child
->last_resume_kind
== resume_stop
&& WSTOPSIG (w
) == SIGSTOP
)
1858 /* A thread that has been requested to stop by GDB with vCont;t,
1859 and it stopped cleanly, so report as SIG0. The use of
1860 SIGSTOP is an implementation detail. */
1861 ourstatus
->value
.sig
= TARGET_SIGNAL_0
;
1863 else if (event_child
->last_resume_kind
== resume_stop
&& WSTOPSIG (w
) != SIGSTOP
)
1865 /* A thread that has been requested to stop by GDB with vCont;t,
1866 but, it stopped for other reasons. */
1867 ourstatus
->value
.sig
= target_signal_from_host (WSTOPSIG (w
));
1871 ourstatus
->value
.sig
= target_signal_from_host (WSTOPSIG (w
));
1874 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
1878 /* From GDB's perspective, all-stop mode always stops all
1879 threads implicitly. Tag all threads as "want-stopped". */
1880 gdb_wants_all_stopped ();
1884 /* We're reporting this LWP as stopped. Update it's
1885 "want-stopped" state to what the client wants, until it gets
1886 a new resume action. */
1887 gdb_wants_lwp_stopped (&event_child
->head
);
1891 fprintf (stderr
, "linux_wait ret = %s, %d, %d\n",
1892 target_pid_to_str (ptid_of (event_child
)),
1894 ourstatus
->value
.sig
);
1896 get_lwp_thread (event_child
)->last_status
= *ourstatus
;
1897 return ptid_of (event_child
);
1900 /* Get rid of any pending event in the pipe. */
1902 async_file_flush (void)
1908 ret
= read (linux_event_pipe
[0], &buf
, 1);
1909 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
1912 /* Put something in the pipe, so the event loop wakes up. */
1914 async_file_mark (void)
1918 async_file_flush ();
1921 ret
= write (linux_event_pipe
[1], "+", 1);
1922 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
1924 /* Ignore EAGAIN. If the pipe is full, the event loop will already
1925 be awakened anyway. */
1929 linux_wait (ptid_t ptid
,
1930 struct target_waitstatus
*ourstatus
, int target_options
)
1935 fprintf (stderr
, "linux_wait: [%s]\n", target_pid_to_str (ptid
));
1937 /* Flush the async file first. */
1938 if (target_is_async_p ())
1939 async_file_flush ();
1941 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
1943 /* If at least one stop was reported, there may be more. A single
1944 SIGCHLD can signal more than one child stop. */
1945 if (target_is_async_p ()
1946 && (target_options
& TARGET_WNOHANG
) != 0
1947 && !ptid_equal (event_ptid
, null_ptid
))
1953 /* Send a signal to an LWP. */
1956 kill_lwp (unsigned long lwpid
, int signo
)
1958 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1959 fails, then we are not using nptl threads and we should be using kill. */
1963 static int tkill_failed
;
1970 ret
= syscall (__NR_tkill
, lwpid
, signo
);
1971 if (errno
!= ENOSYS
)
1978 return kill (lwpid
, signo
);
1982 send_sigstop (struct inferior_list_entry
*entry
)
1984 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
1990 pid
= lwpid_of (lwp
);
1992 /* If we already have a pending stop signal for this process, don't
1994 if (lwp
->stop_expected
)
1997 fprintf (stderr
, "Have pending sigstop for lwp %d\n", pid
);
2003 fprintf (stderr
, "Sending sigstop to lwp %d\n", pid
);
2005 lwp
->stop_expected
= 1;
2006 kill_lwp (pid
, SIGSTOP
);
2010 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
2012 /* It's dead, really. */
2015 /* Store the exit status for later. */
2016 lwp
->status_pending_p
= 1;
2017 lwp
->status_pending
= wstat
;
2019 /* Prevent trying to stop it. */
2022 /* No further stops are expected from a dead lwp. */
2023 lwp
->stop_expected
= 0;
2027 wait_for_sigstop (struct inferior_list_entry
*entry
)
2029 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2030 struct thread_info
*saved_inferior
;
2039 fprintf (stderr
, "wait_for_sigstop: LWP %ld already stopped\n",
2044 saved_inferior
= current_inferior
;
2045 if (saved_inferior
!= NULL
)
2046 saved_tid
= ((struct inferior_list_entry
*) saved_inferior
)->id
;
2048 saved_tid
= null_ptid
; /* avoid bogus unused warning */
2050 ptid
= lwp
->head
.id
;
2053 fprintf (stderr
, "wait_for_sigstop: pulling one event\n");
2055 pid
= linux_wait_for_event (ptid
, &wstat
, __WALL
);
2057 /* If we stopped with a non-SIGSTOP signal, save it for later
2058 and record the pending SIGSTOP. If the process exited, just
2060 if (WIFSTOPPED (wstat
))
2063 fprintf (stderr
, "LWP %ld stopped with signal %d\n",
2064 lwpid_of (lwp
), WSTOPSIG (wstat
));
2066 if (WSTOPSIG (wstat
) != SIGSTOP
)
2069 fprintf (stderr
, "LWP %ld stopped with non-sigstop status %06x\n",
2070 lwpid_of (lwp
), wstat
);
2072 lwp
->status_pending_p
= 1;
2073 lwp
->status_pending
= wstat
;
2079 fprintf (stderr
, "Process %d exited while stopping LWPs\n", pid
);
2081 lwp
= find_lwp_pid (pid_to_ptid (pid
));
2084 /* Leave this status pending for the next time we're able to
2085 report it. In the mean time, we'll report this lwp as
2086 dead to GDB, so GDB doesn't try to read registers and
2087 memory from it. This can only happen if this was the
2088 last thread of the process; otherwise, PID is removed
2089 from the thread tables before linux_wait_for_event
2091 mark_lwp_dead (lwp
, wstat
);
2095 if (saved_inferior
== NULL
|| linux_thread_alive (saved_tid
))
2096 current_inferior
= saved_inferior
;
2100 fprintf (stderr
, "Previously current thread died.\n");
2104 /* We can't change the current inferior behind GDB's back,
2105 otherwise, a subsequent command may apply to the wrong
2107 current_inferior
= NULL
;
2111 /* Set a valid thread as current. */
2112 set_desired_inferior (0);
2118 stop_all_lwps (void)
2120 stopping_threads
= 1;
2121 for_each_inferior (&all_lwps
, send_sigstop
);
2122 for_each_inferior (&all_lwps
, wait_for_sigstop
);
2123 stopping_threads
= 0;
2126 /* Resume execution of the inferior process.
2127 If STEP is nonzero, single-step it.
2128 If SIGNAL is nonzero, give it that signal. */
2131 linux_resume_one_lwp (struct lwp_info
*lwp
,
2132 int step
, int signal
, siginfo_t
*info
)
2134 struct thread_info
*saved_inferior
;
2136 if (lwp
->stopped
== 0)
2139 /* If we have pending signals or status, and a new signal, enqueue the
2140 signal. Also enqueue the signal if we are waiting to reinsert a
2141 breakpoint; it will be picked up again below. */
2143 && (lwp
->status_pending_p
|| lwp
->pending_signals
!= NULL
2144 || lwp
->bp_reinsert
!= 0))
2146 struct pending_signals
*p_sig
;
2147 p_sig
= xmalloc (sizeof (*p_sig
));
2148 p_sig
->prev
= lwp
->pending_signals
;
2149 p_sig
->signal
= signal
;
2151 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
2153 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
2154 lwp
->pending_signals
= p_sig
;
2157 if (lwp
->status_pending_p
)
2160 fprintf (stderr
, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2161 " has pending status\n",
2162 lwpid_of (lwp
), step
? "step" : "continue", signal
,
2163 lwp
->stop_expected
? "expected" : "not expected");
2167 saved_inferior
= current_inferior
;
2168 current_inferior
= get_lwp_thread (lwp
);
2171 fprintf (stderr
, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2172 lwpid_of (lwp
), step
? "step" : "continue", signal
,
2173 lwp
->stop_expected
? "expected" : "not expected");
2175 /* This bit needs some thinking about. If we get a signal that
2176 we must report while a single-step reinsert is still pending,
2177 we often end up resuming the thread. It might be better to
2178 (ew) allow a stack of pending events; then we could be sure that
2179 the reinsert happened right away and not lose any signals.
2181 Making this stack would also shrink the window in which breakpoints are
2182 uninserted (see comment in linux_wait_for_lwp) but not enough for
2183 complete correctness, so it won't solve that problem. It may be
2184 worthwhile just to solve this one, however. */
2185 if (lwp
->bp_reinsert
!= 0)
2188 fprintf (stderr
, " pending reinsert at 0x%s\n",
2189 paddress (lwp
->bp_reinsert
));
2191 if (lwp
->bp_reinsert
!= 0 && can_hardware_single_step ())
2194 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
2199 /* Postpone any pending signal. It was enqueued above. */
2203 if (debug_threads
&& the_low_target
.get_pc
!= NULL
)
2205 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 1);
2206 CORE_ADDR pc
= (*the_low_target
.get_pc
) (regcache
);
2207 fprintf (stderr
, " resuming from pc 0x%lx\n", (long) pc
);
2210 /* If we have pending signals, consume one unless we are trying to reinsert
2212 if (lwp
->pending_signals
!= NULL
&& lwp
->bp_reinsert
== 0)
2214 struct pending_signals
**p_sig
;
2216 p_sig
= &lwp
->pending_signals
;
2217 while ((*p_sig
)->prev
!= NULL
)
2218 p_sig
= &(*p_sig
)->prev
;
2220 signal
= (*p_sig
)->signal
;
2221 if ((*p_sig
)->info
.si_signo
!= 0)
2222 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
), 0, &(*p_sig
)->info
);
2228 if (the_low_target
.prepare_to_resume
!= NULL
)
2229 the_low_target
.prepare_to_resume (lwp
);
2231 regcache_invalidate_one ((struct inferior_list_entry
*)
2232 get_lwp_thread (lwp
));
2235 lwp
->stopped_by_watchpoint
= 0;
2236 lwp
->stepping
= step
;
2237 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (lwp
), 0,
2238 /* Coerce to a uintptr_t first to avoid potential gcc warning
2239 of coercing an 8 byte integer to a 4 byte pointer. */
2240 (PTRACE_ARG4_TYPE
) (uintptr_t) signal
);
2242 current_inferior
= saved_inferior
;
2245 /* ESRCH from ptrace either means that the thread was already
2246 running (an error) or that it is gone (a race condition). If
2247 it's gone, we will get a notification the next time we wait,
2248 so we can ignore the error. We could differentiate these
2249 two, but it's tricky without waiting; the thread still exists
2250 as a zombie, so sending it signal 0 would succeed. So just
2255 perror_with_name ("ptrace");
2259 struct thread_resume_array
2261 struct thread_resume
*resume
;
2265 /* This function is called once per thread. We look up the thread
2266 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2269 This algorithm is O(threads * resume elements), but resume elements
2270 is small (and will remain small at least until GDB supports thread
2273 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
2275 struct lwp_info
*lwp
;
2276 struct thread_info
*thread
;
2278 struct thread_resume_array
*r
;
2280 thread
= (struct thread_info
*) entry
;
2281 lwp
= get_thread_lwp (thread
);
2284 for (ndx
= 0; ndx
< r
->n
; ndx
++)
2286 ptid_t ptid
= r
->resume
[ndx
].thread
;
2287 if (ptid_equal (ptid
, minus_one_ptid
)
2288 || ptid_equal (ptid
, entry
->id
)
2289 || (ptid_is_pid (ptid
)
2290 && (ptid_get_pid (ptid
) == pid_of (lwp
)))
2291 || (ptid_get_lwp (ptid
) == -1
2292 && (ptid_get_pid (ptid
) == pid_of (lwp
))))
2294 if (r
->resume
[ndx
].kind
== resume_stop
2295 && lwp
->last_resume_kind
== resume_stop
)
2298 fprintf (stderr
, "already %s LWP %ld at GDB's request\n",
2299 thread
->last_status
.kind
== TARGET_WAITKIND_STOPPED
2307 lwp
->resume
= &r
->resume
[ndx
];
2308 lwp
->last_resume_kind
= lwp
->resume
->kind
;
2313 /* No resume action for this thread. */
2320 /* Set *FLAG_P if this lwp has an interesting status pending. */
2322 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
2324 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2326 /* LWPs which will not be resumed are not interesting, because
2327 we might not wait for them next time through linux_wait. */
2328 if (lwp
->resume
== NULL
)
2331 if (lwp
->status_pending_p
)
2332 * (int *) flag_p
= 1;
2337 /* Return 1 if this lwp that GDB wants running is stopped at an
2338 internal breakpoint that we need to step over. It assumes that any
2339 required STOP_PC adjustment has already been propagated to the
2340 inferior's regcache. */
2343 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
2345 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2346 struct thread_info
*saved_inferior
;
2349 /* LWPs which will not be resumed are not interesting, because we
2350 might not wait for them next time through linux_wait. */
2356 "Need step over [LWP %ld]? Ignoring, not stopped\n",
2361 if (lwp
->last_resume_kind
== resume_stop
)
2365 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
2370 if (!lwp
->need_step_over
)
2374 "Need step over [LWP %ld]? No\n", lwpid_of (lwp
));
2377 if (lwp
->status_pending_p
)
2381 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
2386 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
2390 /* If the PC has changed since we stopped, then don't do anything,
2391 and let the breakpoint/tracepoint be hit. This happens if, for
2392 instance, GDB handled the decr_pc_after_break subtraction itself,
2393 GDB is OOL stepping this thread, or the user has issued a "jump"
2394 command, or poked thread's registers herself. */
2395 if (pc
!= lwp
->stop_pc
)
2399 "Need step over [LWP %ld]? Cancelling, PC was changed. "
2400 "Old stop_pc was 0x%s, PC is now 0x%s\n",
2401 lwpid_of (lwp
), paddress (lwp
->stop_pc
), paddress (pc
));
2403 lwp
->need_step_over
= 0;
2407 saved_inferior
= current_inferior
;
2408 current_inferior
= get_lwp_thread (lwp
);
2410 /* We can only step over breakpoints we know about. */
2411 if (breakpoint_here (pc
))
2413 /* Don't step over a breakpoint that GDB expects to hit
2415 if (gdb_breakpoint_here (pc
))
2419 "Need step over [LWP %ld]? yes, but found"
2420 " GDB breakpoint at 0x%s; skipping step over\n",
2421 lwpid_of (lwp
), paddress (pc
));
2423 current_inferior
= saved_inferior
;
2430 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
2431 lwpid_of (lwp
), paddress (pc
));
2433 /* We've found an lwp that needs stepping over --- return 1 so
2434 that find_inferior stops looking. */
2435 current_inferior
= saved_inferior
;
2437 /* If the step over is cancelled, this is set again. */
2438 lwp
->need_step_over
= 0;
2443 current_inferior
= saved_inferior
;
2447 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
2448 lwpid_of (lwp
), paddress (pc
));
2453 /* Start a step-over operation on LWP. When LWP stopped at a
2454 breakpoint, to make progress, we need to remove the breakpoint out
2455 of the way. If we let other threads run while we do that, they may
2456 pass by the breakpoint location and miss hitting it. To avoid
2457 that, a step-over momentarily stops all threads while LWP is
2458 single-stepped while the breakpoint is temporarily uninserted from
2459 the inferior. When the single-step finishes, we reinsert the
2460 breakpoint, and let all threads that are supposed to be running,
2463 On targets that don't support hardware single-step, we don't
2464 currently support full software single-stepping. Instead, we only
2465 support stepping over the thread event breakpoint, by asking the
2466 low target where to place a reinsert breakpoint. Since this
2467 routine assumes the breakpoint being stepped over is a thread event
2468 breakpoint, it usually assumes the return address of the current
2469 function is a good enough place to set the reinsert breakpoint. */
2472 start_step_over (struct lwp_info
*lwp
)
2474 struct thread_info
*saved_inferior
;
2480 "Starting step-over on LWP %ld. Stopping all threads\n",
2486 fprintf (stderr
, "Done stopping all threads for step-over.\n");
2488 /* Note, we should always reach here with an already adjusted PC,
2489 either by GDB (if we're resuming due to GDB's request), or by our
2490 caller, if we just finished handling an internal breakpoint GDB
2491 shouldn't care about. */
2494 saved_inferior
= current_inferior
;
2495 current_inferior
= get_lwp_thread (lwp
);
2497 lwp
->bp_reinsert
= pc
;
2498 uninsert_breakpoints_at (pc
);
2500 if (can_hardware_single_step ())
2506 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
2507 set_reinsert_breakpoint (raddr
);
2511 current_inferior
= saved_inferior
;
2513 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
2515 /* Require next event from this LWP. */
2516 step_over_bkpt
= lwp
->head
.id
;
2520 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
2521 start_step_over, if still there, and delete any reinsert
2522 breakpoints we've set, on non hardware single-step targets. */
2525 finish_step_over (struct lwp_info
*lwp
)
2527 if (lwp
->bp_reinsert
!= 0)
2530 fprintf (stderr
, "Finished step over.\n");
2532 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
2533 may be no breakpoint to reinsert there by now. */
2534 reinsert_breakpoints_at (lwp
->bp_reinsert
);
2536 lwp
->bp_reinsert
= 0;
2538 /* Delete any software-single-step reinsert breakpoints. No
2539 longer needed. We don't have to worry about other threads
2540 hitting this trap, and later not being able to explain it,
2541 because we were stepping over a breakpoint, and we hold all
2542 threads but LWP stopped while doing that. */
2543 if (!can_hardware_single_step ())
2544 delete_reinsert_breakpoints ();
2546 step_over_bkpt
= null_ptid
;
2553 /* This function is called once per thread. We check the thread's resume
2554 request, which will tell us whether to resume, step, or leave the thread
2555 stopped; and what signal, if any, it should be sent.
2557 For threads which we aren't explicitly told otherwise, we preserve
2558 the stepping flag; this is used for stepping over gdbserver-placed
2561 If pending_flags was set in any thread, we queue any needed
2562 signals, since we won't actually resume. We already have a pending
2563 event to report, so we don't need to preserve any step requests;
2564 they should be re-issued if necessary. */
2567 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
2569 struct lwp_info
*lwp
;
2570 struct thread_info
*thread
;
2572 int leave_all_stopped
= * (int *) arg
;
2575 thread
= (struct thread_info
*) entry
;
2576 lwp
= get_thread_lwp (thread
);
2578 if (lwp
->resume
== NULL
)
2581 if (lwp
->resume
->kind
== resume_stop
)
2584 fprintf (stderr
, "resume_stop request for LWP %ld\n", lwpid_of (lwp
));
2589 fprintf (stderr
, "stopping LWP %ld\n", lwpid_of (lwp
));
2591 /* Stop the thread, and wait for the event asynchronously,
2592 through the event loop. */
2593 send_sigstop (&lwp
->head
);
2598 fprintf (stderr
, "already stopped LWP %ld\n",
2601 /* The LWP may have been stopped in an internal event that
2602 was not meant to be notified back to GDB (e.g., gdbserver
2603 breakpoint), so we should be reporting a stop event in
2606 /* If the thread already has a pending SIGSTOP, this is a
2607 no-op. Otherwise, something later will presumably resume
2608 the thread and this will cause it to cancel any pending
2609 operation, due to last_resume_kind == resume_stop. If
2610 the thread already has a pending status to report, we
2611 will still report it the next time we wait - see
2612 status_pending_p_callback. */
2613 send_sigstop (&lwp
->head
);
2616 /* For stop requests, we're done. */
2618 get_lwp_thread (lwp
)->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
2622 /* If this thread which is about to be resumed has a pending status,
2623 then don't resume any threads - we can just report the pending
2624 status. Make sure to queue any signals that would otherwise be
2625 sent. In all-stop mode, we do this decision based on if *any*
2626 thread has a pending status. If there's a thread that needs the
2627 step-over-breakpoint dance, then don't resume any other thread
2628 but that particular one. */
2629 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
2634 fprintf (stderr
, "resuming LWP %ld\n", lwpid_of (lwp
));
2636 step
= (lwp
->resume
->kind
== resume_step
);
2637 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
2638 get_lwp_thread (lwp
)->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
2643 fprintf (stderr
, "leaving LWP %ld stopped\n", lwpid_of (lwp
));
2645 /* If we have a new signal, enqueue the signal. */
2646 if (lwp
->resume
->sig
!= 0)
2648 struct pending_signals
*p_sig
;
2649 p_sig
= xmalloc (sizeof (*p_sig
));
2650 p_sig
->prev
= lwp
->pending_signals
;
2651 p_sig
->signal
= lwp
->resume
->sig
;
2652 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
2654 /* If this is the same signal we were previously stopped by,
2655 make sure to queue its siginfo. We can ignore the return
2656 value of ptrace; if it fails, we'll skip
2657 PTRACE_SETSIGINFO. */
2658 if (WIFSTOPPED (lwp
->last_status
)
2659 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
2660 ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
), 0, &p_sig
->info
);
2662 lwp
->pending_signals
= p_sig
;
2671 linux_resume (struct thread_resume
*resume_info
, size_t n
)
2673 struct thread_resume_array array
= { resume_info
, n
};
2674 struct lwp_info
*need_step_over
= NULL
;
2676 int leave_all_stopped
;
2678 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
2680 /* If there is a thread which would otherwise be resumed, which has
2681 a pending status, then don't resume any threads - we can just
2682 report the pending status. Make sure to queue any signals that
2683 would otherwise be sent. In non-stop mode, we'll apply this
2684 logic to each thread individually. We consume all pending events
2685 before considering to start a step-over (in all-stop). */
2688 find_inferior (&all_lwps
, resume_status_pending_p
, &any_pending
);
2690 /* If there is a thread which would otherwise be resumed, which is
2691 stopped at a breakpoint that needs stepping over, then don't
2692 resume any threads - have it step over the breakpoint with all
2693 other threads stopped, then resume all threads again. Make sure
2694 to queue any signals that would otherwise be delivered or
2696 if (!any_pending
&& supports_breakpoints ())
2698 = (struct lwp_info
*) find_inferior (&all_lwps
,
2699 need_step_over_p
, NULL
);
2701 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
2705 if (need_step_over
!= NULL
)
2706 fprintf (stderr
, "Not resuming all, need step over\n");
2707 else if (any_pending
)
2709 "Not resuming, all-stop and found "
2710 "an LWP with pending status\n");
2712 fprintf (stderr
, "Resuming, no pending status or step over needed\n");
2715 /* Even if we're leaving threads stopped, queue all signals we'd
2716 otherwise deliver. */
2717 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
2720 start_step_over (need_step_over
);
2723 /* This function is called once per thread. We check the thread's
2724 last resume request, which will tell us whether to resume, step, or
2725 leave the thread stopped. Any signal the client requested to be
2726 delivered has already been enqueued at this point.
2728 If any thread that GDB wants running is stopped at an internal
2729 breakpoint that needs stepping over, we start a step-over operation
2730 on that particular thread, and leave all others stopped. */
2733 proceed_one_lwp (struct inferior_list_entry
*entry
)
2735 struct lwp_info
*lwp
;
2738 lwp
= (struct lwp_info
*) entry
;
2742 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp
));
2747 fprintf (stderr
, " LWP %ld already running\n", lwpid_of (lwp
));
2751 if (lwp
->last_resume_kind
== resume_stop
)
2754 fprintf (stderr
, " client wants LWP %ld stopped\n", lwpid_of (lwp
));
2758 if (lwp
->status_pending_p
)
2761 fprintf (stderr
, " LWP %ld has pending status, leaving stopped\n",
2769 fprintf (stderr
, " LWP %ld is suspended\n", lwpid_of (lwp
));
2773 step
= lwp
->last_resume_kind
== resume_step
;
2774 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
2777 /* When we finish a step-over, set threads running again. If there's
2778 another thread that may need a step-over, now's the time to start
2779 it. Eventually, we'll move all threads past their breakpoints. */
2782 proceed_all_lwps (void)
2784 struct lwp_info
*need_step_over
;
2786 /* If there is a thread which would otherwise be resumed, which is
2787 stopped at a breakpoint that needs stepping over, then don't
2788 resume any threads - have it step over the breakpoint with all
2789 other threads stopped, then resume all threads again. */
2791 if (supports_breakpoints ())
2794 = (struct lwp_info
*) find_inferior (&all_lwps
,
2795 need_step_over_p
, NULL
);
2797 if (need_step_over
!= NULL
)
2800 fprintf (stderr
, "proceed_all_lwps: found "
2801 "thread %ld needing a step-over\n",
2802 lwpid_of (need_step_over
));
2804 start_step_over (need_step_over
);
2810 fprintf (stderr
, "Proceeding, no step-over needed\n");
2812 for_each_inferior (&all_lwps
, proceed_one_lwp
);
2815 /* Stopped LWPs that the client wanted to be running, that don't have
2816 pending statuses, are set to run again, except for EXCEPT, if not
2817 NULL. This undoes a stop_all_lwps call. */
2820 unstop_all_lwps (struct lwp_info
*except
)
2826 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except
));
2829 "unstopping all lwps\n");
2832 /* Make sure proceed_one_lwp doesn't try to resume this thread. */
2834 ++except
->suspended
;
2836 for_each_inferior (&all_lwps
, proceed_one_lwp
);
2839 --except
->suspended
;
2842 #ifdef HAVE_LINUX_USRREGS
2845 register_addr (int regnum
)
2849 if (regnum
< 0 || regnum
>= the_low_target
.num_regs
)
2850 error ("Invalid register number %d.", regnum
);
2852 addr
= the_low_target
.regmap
[regnum
];
2857 /* Fetch one register. */
2859 fetch_register (struct regcache
*regcache
, int regno
)
2866 if (regno
>= the_low_target
.num_regs
)
2868 if ((*the_low_target
.cannot_fetch_register
) (regno
))
2871 regaddr
= register_addr (regno
);
2875 pid
= lwpid_of (get_thread_lwp (current_inferior
));
2876 size
= ((register_size (regno
) + sizeof (PTRACE_XFER_TYPE
) - 1)
2877 & - sizeof (PTRACE_XFER_TYPE
));
2878 buf
= alloca (size
);
2879 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
2882 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
2883 ptrace (PTRACE_PEEKUSER
, pid
,
2884 /* Coerce to a uintptr_t first to avoid potential gcc warning
2885 of coercing an 8 byte integer to a 4 byte pointer. */
2886 (PTRACE_ARG3_TYPE
) (uintptr_t) regaddr
, 0);
2887 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
2889 error ("reading register %d: %s", regno
, strerror (errno
));
2892 if (the_low_target
.supply_ptrace_register
)
2893 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
2895 supply_register (regcache
, regno
, buf
);
2898 /* Fetch all registers, or just one, from the child process. */
2900 usr_fetch_inferior_registers (struct regcache
*regcache
, int regno
)
2903 for (regno
= 0; regno
< the_low_target
.num_regs
; regno
++)
2904 fetch_register (regcache
, regno
);
2906 fetch_register (regcache
, regno
);
2909 /* Store our register values back into the inferior.
2910 If REGNO is -1, do this for all registers.
2911 Otherwise, REGNO specifies which register (so we can save time). */
2913 usr_store_inferior_registers (struct regcache
*regcache
, int regno
)
2922 if (regno
>= the_low_target
.num_regs
)
2925 if ((*the_low_target
.cannot_store_register
) (regno
) == 1)
2928 regaddr
= register_addr (regno
);
2932 size
= (register_size (regno
) + sizeof (PTRACE_XFER_TYPE
) - 1)
2933 & - sizeof (PTRACE_XFER_TYPE
);
2934 buf
= alloca (size
);
2935 memset (buf
, 0, size
);
2937 if (the_low_target
.collect_ptrace_register
)
2938 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
2940 collect_register (regcache
, regno
, buf
);
2942 pid
= lwpid_of (get_thread_lwp (current_inferior
));
2943 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
2946 ptrace (PTRACE_POKEUSER
, pid
,
2947 /* Coerce to a uintptr_t first to avoid potential gcc warning
2948 about coercing an 8 byte integer to a 4 byte pointer. */
2949 (PTRACE_ARG3_TYPE
) (uintptr_t) regaddr
,
2950 (PTRACE_ARG4_TYPE
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
2953 /* At this point, ESRCH should mean the process is
2954 already gone, in which case we simply ignore attempts
2955 to change its registers. See also the related
2956 comment in linux_resume_one_lwp. */
2960 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
2961 error ("writing register %d: %s", regno
, strerror (errno
));
2963 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
2967 for (regno
= 0; regno
< the_low_target
.num_regs
; regno
++)
2968 usr_store_inferior_registers (regcache
, regno
);
2970 #endif /* HAVE_LINUX_USRREGS */
2974 #ifdef HAVE_LINUX_REGSETS
2977 regsets_fetch_inferior_registers (struct regcache
*regcache
)
2979 struct regset_info
*regset
;
2980 int saw_general_regs
= 0;
2983 regset
= target_regsets
;
2985 pid
= lwpid_of (get_thread_lwp (current_inferior
));
2986 while (regset
->size
>= 0)
2991 if (regset
->size
== 0 || disabled_regsets
[regset
- target_regsets
])
2997 buf
= xmalloc (regset
->size
);
2999 res
= ptrace (regset
->get_request
, pid
, 0, buf
);
3001 res
= ptrace (regset
->get_request
, pid
, buf
, 0);
3007 /* If we get EIO on a regset, do not try it again for
3009 disabled_regsets
[regset
- target_regsets
] = 1;
3016 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3021 else if (regset
->type
== GENERAL_REGS
)
3022 saw_general_regs
= 1;
3023 regset
->store_function (regcache
, buf
);
3027 if (saw_general_regs
)
3034 regsets_store_inferior_registers (struct regcache
*regcache
)
3036 struct regset_info
*regset
;
3037 int saw_general_regs
= 0;
3040 regset
= target_regsets
;
3042 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3043 while (regset
->size
>= 0)
3048 if (regset
->size
== 0 || disabled_regsets
[regset
- target_regsets
])
3054 buf
= xmalloc (regset
->size
);
3056 /* First fill the buffer with the current register set contents,
3057 in case there are any items in the kernel's regset that are
3058 not in gdbserver's regcache. */
3060 res
= ptrace (regset
->get_request
, pid
, 0, buf
);
3062 res
= ptrace (regset
->get_request
, pid
, buf
, 0);
3067 /* Then overlay our cached registers on that. */
3068 regset
->fill_function (regcache
, buf
);
3070 /* Only now do we write the register set. */
3072 res
= ptrace (regset
->set_request
, pid
, 0, buf
);
3074 res
= ptrace (regset
->set_request
, pid
, buf
, 0);
3082 /* If we get EIO on a regset, do not try it again for
3084 disabled_regsets
[regset
- target_regsets
] = 1;
3088 else if (errno
== ESRCH
)
3090 /* At this point, ESRCH should mean the process is
3091 already gone, in which case we simply ignore attempts
3092 to change its registers. See also the related
3093 comment in linux_resume_one_lwp. */
3099 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3102 else if (regset
->type
== GENERAL_REGS
)
3103 saw_general_regs
= 1;
3107 if (saw_general_regs
)
3114 #endif /* HAVE_LINUX_REGSETS */
3118 linux_fetch_registers (struct regcache
*regcache
, int regno
)
3120 #ifdef HAVE_LINUX_REGSETS
3121 if (regsets_fetch_inferior_registers (regcache
) == 0)
3124 #ifdef HAVE_LINUX_USRREGS
3125 usr_fetch_inferior_registers (regcache
, regno
);
3130 linux_store_registers (struct regcache
*regcache
, int regno
)
3132 #ifdef HAVE_LINUX_REGSETS
3133 if (regsets_store_inferior_registers (regcache
) == 0)
3136 #ifdef HAVE_LINUX_USRREGS
3137 usr_store_inferior_registers (regcache
, regno
);
3142 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3143 to debugger memory starting at MYADDR. */
3146 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
3149 /* Round starting address down to longword boundary. */
3150 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
3151 /* Round ending address up; get number of longwords that makes. */
3153 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
3154 / sizeof (PTRACE_XFER_TYPE
);
3155 /* Allocate buffer of that many longwords. */
3156 register PTRACE_XFER_TYPE
*buffer
3157 = (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
3160 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3162 /* Try using /proc. Don't bother for one word. */
3163 if (len
>= 3 * sizeof (long))
3165 /* We could keep this file open and cache it - possibly one per
3166 thread. That requires some juggling, but is even faster. */
3167 sprintf (filename
, "/proc/%d/mem", pid
);
3168 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
3172 /* If pread64 is available, use it. It's faster if the kernel
3173 supports it (only one syscall), and it's 64-bit safe even on
3174 32-bit platforms (for instance, SPARC debugging a SPARC64
3177 if (pread64 (fd
, myaddr
, len
, memaddr
) != len
)
3179 if (lseek (fd
, memaddr
, SEEK_SET
) == -1 || read (fd
, myaddr
, len
) != len
)
3191 /* Read all the longwords */
3192 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
3195 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3196 about coercing an 8 byte integer to a 4 byte pointer. */
3197 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
3198 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
, 0);
3203 /* Copy appropriate bytes out of the buffer. */
3205 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
3211 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3212 memory at MEMADDR. On failure (cannot write to the inferior)
3213 returns the value of errno. */
3216 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
3219 /* Round starting address down to longword boundary. */
3220 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
3221 /* Round ending address up; get number of longwords that makes. */
3223 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1) / sizeof (PTRACE_XFER_TYPE
);
3224 /* Allocate buffer of that many longwords. */
3225 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
3226 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3230 /* Dump up to four bytes. */
3231 unsigned int val
= * (unsigned int *) myaddr
;
3237 val
= val
& 0xffffff;
3238 fprintf (stderr
, "Writing %0*x to 0x%08lx\n", 2 * ((len
< 4) ? len
: 4),
3239 val
, (long)memaddr
);
3242 /* Fill start and end extra bytes of buffer with existing memory data. */
3245 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3246 about coercing an 8 byte integer to a 4 byte pointer. */
3247 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
3248 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
, 0);
3256 = ptrace (PTRACE_PEEKTEXT
, pid
,
3257 /* Coerce to a uintptr_t first to avoid potential gcc warning
3258 about coercing an 8 byte integer to a 4 byte pointer. */
3259 (PTRACE_ARG3_TYPE
) (uintptr_t) (addr
+ (count
- 1)
3260 * sizeof (PTRACE_XFER_TYPE
)),
3266 /* Copy data to be written over corresponding part of buffer. */
3268 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)), myaddr
, len
);
3270 /* Write the entire buffer. */
3272 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
3275 ptrace (PTRACE_POKETEXT
, pid
,
3276 /* Coerce to a uintptr_t first to avoid potential gcc warning
3277 about coercing an 8 byte integer to a 4 byte pointer. */
3278 (PTRACE_ARG3_TYPE
) (uintptr_t) addr
,
3279 (PTRACE_ARG4_TYPE
) buffer
[i
]);
3287 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
3288 static int linux_supports_tracefork_flag
;
3290 /* Helper functions for linux_test_for_tracefork, called via clone (). */
3293 linux_tracefork_grandchild (void *arg
)
3298 #define STACK_SIZE 4096
3301 linux_tracefork_child (void *arg
)
3303 ptrace (PTRACE_TRACEME
, 0, 0, 0);
3304 kill (getpid (), SIGSTOP
);
3306 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3309 linux_tracefork_grandchild (NULL
);
3311 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3314 __clone2 (linux_tracefork_grandchild
, arg
, STACK_SIZE
,
3315 CLONE_VM
| SIGCHLD
, NULL
);
3317 clone (linux_tracefork_grandchild
, arg
+ STACK_SIZE
,
3318 CLONE_VM
| SIGCHLD
, NULL
);
3321 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3326 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
3327 sure that we can enable the option, and that it had the desired
3331 linux_test_for_tracefork (void)
3333 int child_pid
, ret
, status
;
3335 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3336 char *stack
= xmalloc (STACK_SIZE
* 4);
3337 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3339 linux_supports_tracefork_flag
= 0;
3341 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
3343 child_pid
= fork ();
3345 linux_tracefork_child (NULL
);
3347 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3349 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
3351 child_pid
= __clone2 (linux_tracefork_child
, stack
, STACK_SIZE
,
3352 CLONE_VM
| SIGCHLD
, stack
+ STACK_SIZE
* 2);
3353 #else /* !__ia64__ */
3354 child_pid
= clone (linux_tracefork_child
, stack
+ STACK_SIZE
,
3355 CLONE_VM
| SIGCHLD
, stack
+ STACK_SIZE
* 2);
3356 #endif /* !__ia64__ */
3358 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3360 if (child_pid
== -1)
3361 perror_with_name ("clone");
3363 ret
= my_waitpid (child_pid
, &status
, 0);
3365 perror_with_name ("waitpid");
3366 else if (ret
!= child_pid
)
3367 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret
);
3368 if (! WIFSTOPPED (status
))
3369 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status
);
3371 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
3372 (PTRACE_ARG4_TYPE
) PTRACE_O_TRACEFORK
);
3375 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
3378 warning ("linux_test_for_tracefork: failed to kill child");
3382 ret
= my_waitpid (child_pid
, &status
, 0);
3383 if (ret
!= child_pid
)
3384 warning ("linux_test_for_tracefork: failed to wait for killed child");
3385 else if (!WIFSIGNALED (status
))
3386 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
3387 "killed child", status
);
3392 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
3394 warning ("linux_test_for_tracefork: failed to resume child");
3396 ret
= my_waitpid (child_pid
, &status
, 0);
3398 if (ret
== child_pid
&& WIFSTOPPED (status
)
3399 && status
>> 16 == PTRACE_EVENT_FORK
)
3402 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
3403 if (ret
== 0 && second_pid
!= 0)
3407 linux_supports_tracefork_flag
= 1;
3408 my_waitpid (second_pid
, &second_status
, 0);
3409 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
3411 warning ("linux_test_for_tracefork: failed to kill second child");
3412 my_waitpid (second_pid
, &status
, 0);
3416 warning ("linux_test_for_tracefork: unexpected result from waitpid "
3417 "(%d, status 0x%x)", ret
, status
);
3421 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
3423 warning ("linux_test_for_tracefork: failed to kill child");
3424 my_waitpid (child_pid
, &status
, 0);
3426 while (WIFSTOPPED (status
));
3428 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3430 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
3435 linux_look_up_symbols (void)
3437 #ifdef USE_THREAD_DB
3438 struct process_info
*proc
= current_process ();
3440 if (proc
->private->thread_db
!= NULL
)
3443 /* If the kernel supports tracing forks then it also supports tracing
3444 clones, and then we don't need to use the magic thread event breakpoint
3445 to learn about threads. */
3446 thread_db_init (!linux_supports_tracefork_flag
);
3451 linux_request_interrupt (void)
3453 extern unsigned long signal_pid
;
3455 if (!ptid_equal (cont_thread
, null_ptid
)
3456 && !ptid_equal (cont_thread
, minus_one_ptid
))
3458 struct lwp_info
*lwp
;
3461 lwp
= get_thread_lwp (current_inferior
);
3462 lwpid
= lwpid_of (lwp
);
3463 kill_lwp (lwpid
, SIGINT
);
3466 kill_lwp (signal_pid
, SIGINT
);
3469 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
3470 to debugger memory starting at MYADDR. */
3473 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
3475 char filename
[PATH_MAX
];
3477 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3479 snprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
3481 fd
= open (filename
, O_RDONLY
);
3485 if (offset
!= (CORE_ADDR
) 0
3486 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
3489 n
= read (fd
, myaddr
, len
);
3496 /* These breakpoint and watchpoint related wrapper functions simply
3497 pass on the function call if the target has registered a
3498 corresponding function. */
3501 linux_insert_point (char type
, CORE_ADDR addr
, int len
)
3503 if (the_low_target
.insert_point
!= NULL
)
3504 return the_low_target
.insert_point (type
, addr
, len
);
3506 /* Unsupported (see target.h). */
3511 linux_remove_point (char type
, CORE_ADDR addr
, int len
)
3513 if (the_low_target
.remove_point
!= NULL
)
3514 return the_low_target
.remove_point (type
, addr
, len
);
3516 /* Unsupported (see target.h). */
3521 linux_stopped_by_watchpoint (void)
3523 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
3525 return lwp
->stopped_by_watchpoint
;
3529 linux_stopped_data_address (void)
3531 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
3533 return lwp
->stopped_data_address
;
3536 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
3537 #if defined(__mcoldfire__)
3538 /* These should really be defined in the kernel's ptrace.h header. */
3539 #define PT_TEXT_ADDR 49*4
3540 #define PT_DATA_ADDR 50*4
3541 #define PT_TEXT_END_ADDR 51*4
3544 /* Under uClinux, programs are loaded at non-zero offsets, which we need
3545 to tell gdb about. */
3548 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
3550 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
3551 unsigned long text
, text_end
, data
;
3552 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
3556 text
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_TEXT_ADDR
, 0);
3557 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_TEXT_END_ADDR
, 0);
3558 data
= ptrace (PTRACE_PEEKUSER
, pid
, (long)PT_DATA_ADDR
, 0);
3562 /* Both text and data offsets produced at compile-time (and so
3563 used by gdb) are relative to the beginning of the program,
3564 with the data segment immediately following the text segment.
3565 However, the actual runtime layout in memory may put the data
3566 somewhere else, so when we send gdb a data base-address, we
3567 use the real data base address and subtract the compile-time
3568 data base-address from it (which is just the length of the
3569 text segment). BSS immediately follows data in both
3572 *data_p
= data
- (text_end
- text
);
3582 compare_ints (const void *xa
, const void *xb
)
3584 int a
= *(const int *)xa
;
3585 int b
= *(const int *)xb
;
3591 unique (int *b
, int *e
)
3600 /* Given PID, iterates over all threads in that process.
3602 Information about each thread, in a format suitable for qXfer:osdata:thread
3603 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
3604 initialized, and the caller is responsible for finishing and appending '\0'
3607 The list of cores that threads are running on is assigned to *CORES, if it
3608 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
3609 should free *CORES. */
3612 list_threads (int pid
, struct buffer
*buffer
, char **cores
)
3616 int *core_numbers
= xmalloc (sizeof (int) * allocated
);
3620 struct stat statbuf
;
3622 sprintf (pathname
, "/proc/%d/task", pid
);
3623 if (stat (pathname
, &statbuf
) == 0 && S_ISDIR (statbuf
.st_mode
))
3625 dir
= opendir (pathname
);
3628 free (core_numbers
);
3632 while ((dp
= readdir (dir
)) != NULL
)
3634 unsigned long lwp
= strtoul (dp
->d_name
, NULL
, 10);
3638 unsigned core
= linux_core_of_thread (ptid_build (pid
, lwp
, 0));
3642 char s
[sizeof ("4294967295")];
3643 sprintf (s
, "%u", core
);
3645 if (count
== allocated
)
3648 core_numbers
= realloc (core_numbers
,
3649 sizeof (int) * allocated
);
3651 core_numbers
[count
++] = core
;
3653 buffer_xml_printf (buffer
,
3655 "<column name=\"pid\">%d</column>"
3656 "<column name=\"tid\">%s</column>"
3657 "<column name=\"core\">%s</column>"
3658 "</item>", pid
, dp
->d_name
, s
);
3663 buffer_xml_printf (buffer
,
3665 "<column name=\"pid\">%d</column>"
3666 "<column name=\"tid\">%s</column>"
3667 "</item>", pid
, dp
->d_name
);
3678 struct buffer buffer2
;
3681 qsort (core_numbers
, count
, sizeof (int), compare_ints
);
3683 /* Remove duplicates. */
3685 e
= unique (b
, core_numbers
+ count
);
3687 buffer_init (&buffer2
);
3689 for (b
= core_numbers
; b
!= e
; ++b
)
3691 char number
[sizeof ("4294967295")];
3692 sprintf (number
, "%u", *b
);
3693 buffer_xml_printf (&buffer2
, "%s%s",
3694 (b
== core_numbers
) ? "" : ",", number
);
3696 buffer_grow_str0 (&buffer2
, "");
3698 *cores
= buffer_finish (&buffer2
);
3701 free (core_numbers
);
3705 show_process (int pid
, const char *username
, struct buffer
*buffer
)
3709 char cmd
[MAXPATHLEN
+ 1];
3711 sprintf (pathname
, "/proc/%d/cmdline", pid
);
3713 if ((f
= fopen (pathname
, "r")) != NULL
)
3715 size_t len
= fread (cmd
, 1, sizeof (cmd
) - 1, f
);
3720 for (i
= 0; i
< len
; i
++)
3725 buffer_xml_printf (buffer
,
3727 "<column name=\"pid\">%d</column>"
3728 "<column name=\"user\">%s</column>"
3729 "<column name=\"command\">%s</column>",
3734 /* This only collects core numbers, and does not print threads. */
3735 list_threads (pid
, NULL
, &cores
);
3739 buffer_xml_printf (buffer
,
3740 "<column name=\"cores\">%s</column>", cores
);
3744 buffer_xml_printf (buffer
, "</item>");
3751 linux_qxfer_osdata (const char *annex
,
3752 unsigned char *readbuf
, unsigned const char *writebuf
,
3753 CORE_ADDR offset
, int len
)
3755 /* We make the process list snapshot when the object starts to be
3757 static const char *buf
;
3758 static long len_avail
= -1;
3759 static struct buffer buffer
;
3765 if (strcmp (annex
, "processes") == 0)
3767 else if (strcmp (annex
, "threads") == 0)
3772 if (!readbuf
|| writebuf
)
3777 if (len_avail
!= -1 && len_avail
!= 0)
3778 buffer_free (&buffer
);
3781 buffer_init (&buffer
);
3783 buffer_grow_str (&buffer
, "<osdata type=\"processes\">");
3785 buffer_grow_str (&buffer
, "<osdata type=\"threads\">");
3787 dirp
= opendir ("/proc");
3791 while ((dp
= readdir (dirp
)) != NULL
)
3793 struct stat statbuf
;
3794 char procentry
[sizeof ("/proc/4294967295")];
3796 if (!isdigit (dp
->d_name
[0])
3797 || strlen (dp
->d_name
) > sizeof ("4294967295") - 1)
3800 sprintf (procentry
, "/proc/%s", dp
->d_name
);
3801 if (stat (procentry
, &statbuf
) == 0
3802 && S_ISDIR (statbuf
.st_mode
))
3804 int pid
= (int) strtoul (dp
->d_name
, NULL
, 10);
3808 struct passwd
*entry
= getpwuid (statbuf
.st_uid
);
3809 show_process (pid
, entry
? entry
->pw_name
: "?", &buffer
);
3813 list_threads (pid
, &buffer
, NULL
);
3820 buffer_grow_str0 (&buffer
, "</osdata>\n");
3821 buf
= buffer_finish (&buffer
);
3822 len_avail
= strlen (buf
);
3825 if (offset
>= len_avail
)
3827 /* Done. Get rid of the data. */
3828 buffer_free (&buffer
);
3834 if (len
> len_avail
- offset
)
3835 len
= len_avail
- offset
;
3836 memcpy (readbuf
, buf
+ offset
, len
);
3841 /* Convert a native/host siginfo object, into/from the siginfo in the
3842 layout of the inferiors' architecture. */
3845 siginfo_fixup (struct siginfo
*siginfo
, void *inf_siginfo
, int direction
)
3849 if (the_low_target
.siginfo_fixup
!= NULL
)
3850 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
3852 /* If there was no callback, or the callback didn't do anything,
3853 then just do a straight memcpy. */
3857 memcpy (siginfo
, inf_siginfo
, sizeof (struct siginfo
));
3859 memcpy (inf_siginfo
, siginfo
, sizeof (struct siginfo
));
3864 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
3865 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
3868 struct siginfo siginfo
;
3869 char inf_siginfo
[sizeof (struct siginfo
)];
3871 if (current_inferior
== NULL
)
3874 pid
= lwpid_of (get_thread_lwp (current_inferior
));
3877 fprintf (stderr
, "%s siginfo for lwp %d.\n",
3878 readbuf
!= NULL
? "Reading" : "Writing",
3881 if (offset
> sizeof (siginfo
))
3884 if (ptrace (PTRACE_GETSIGINFO
, pid
, 0, &siginfo
) != 0)
3887 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3888 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3889 inferior with a 64-bit GDBSERVER should look the same as debugging it
3890 with a 32-bit GDBSERVER, we need to convert it. */
3891 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
3893 if (offset
+ len
> sizeof (siginfo
))
3894 len
= sizeof (siginfo
) - offset
;
3896 if (readbuf
!= NULL
)
3897 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
3900 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
3902 /* Convert back to ptrace layout before flushing it out. */
3903 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
3905 if (ptrace (PTRACE_SETSIGINFO
, pid
, 0, &siginfo
) != 0)
3912 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3913 so we notice when children change state; as the handler for the
3914 sigsuspend in my_waitpid. */
3917 sigchld_handler (int signo
)
3919 int old_errno
= errno
;
3922 /* fprintf is not async-signal-safe, so call write directly. */
3923 write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3925 if (target_is_async_p ())
3926 async_file_mark (); /* trigger a linux_wait */
3932 linux_supports_non_stop (void)
3938 linux_async (int enable
)
3940 int previous
= (linux_event_pipe
[0] != -1);
3942 if (previous
!= enable
)
3945 sigemptyset (&mask
);
3946 sigaddset (&mask
, SIGCHLD
);
3948 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
3952 if (pipe (linux_event_pipe
) == -1)
3953 fatal ("creating event pipe failed.");
3955 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
3956 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
3958 /* Register the event loop handler. */
3959 add_file_handler (linux_event_pipe
[0],
3960 handle_target_event
, NULL
);
3962 /* Always trigger a linux_wait. */
3967 delete_file_handler (linux_event_pipe
[0]);
3969 close (linux_event_pipe
[0]);
3970 close (linux_event_pipe
[1]);
3971 linux_event_pipe
[0] = -1;
3972 linux_event_pipe
[1] = -1;
3975 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
3982 linux_start_non_stop (int nonstop
)
3984 /* Register or unregister from event-loop accordingly. */
3985 linux_async (nonstop
);
3990 linux_supports_multi_process (void)
3996 /* Enumerate spufs IDs for process PID. */
3998 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
4004 struct dirent
*entry
;
4006 sprintf (path
, "/proc/%ld/fd", pid
);
4007 dir
= opendir (path
);
4012 while ((entry
= readdir (dir
)) != NULL
)
4018 fd
= atoi (entry
->d_name
);
4022 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
4023 if (stat (path
, &st
) != 0)
4025 if (!S_ISDIR (st
.st_mode
))
4028 if (statfs (path
, &stfs
) != 0)
4030 if (stfs
.f_type
!= SPUFS_MAGIC
)
4033 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
4035 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
4045 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4046 object type, using the /proc file system. */
4048 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
4049 unsigned const char *writebuf
,
4050 CORE_ADDR offset
, int len
)
4052 long pid
= lwpid_of (get_thread_lwp (current_inferior
));
4057 if (!writebuf
&& !readbuf
)
4065 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
4068 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
4069 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
4074 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4081 ret
= write (fd
, writebuf
, (size_t) len
);
4083 ret
= read (fd
, readbuf
, (size_t) len
);
4090 linux_core_of_thread (ptid_t ptid
)
4092 char filename
[sizeof ("/proc//task//stat")
4093 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4096 char *content
= NULL
;
4099 int content_read
= 0;
4103 sprintf (filename
, "/proc/%d/task/%ld/stat",
4104 ptid_get_pid (ptid
), ptid_get_lwp (ptid
));
4105 f
= fopen (filename
, "r");
4112 content
= realloc (content
, content_read
+ 1024);
4113 n
= fread (content
+ content_read
, 1, 1024, f
);
4117 content
[content_read
] = '\0';
4122 p
= strchr (content
, '(');
4123 p
= strchr (p
, ')') + 2; /* skip ")" and a whitespace. */
4125 p
= strtok_r (p
, " ", &ts
);
4126 for (i
= 0; i
!= 36; ++i
)
4127 p
= strtok_r (NULL
, " ", &ts
);
4129 if (sscanf (p
, "%d", &core
) == 0)
4138 static struct target_ops linux_target_ops
= {
4139 linux_create_inferior
,
4147 linux_fetch_registers
,
4148 linux_store_registers
,
4151 linux_look_up_symbols
,
4152 linux_request_interrupt
,
4156 linux_stopped_by_watchpoint
,
4157 linux_stopped_data_address
,
4158 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4163 #ifdef USE_THREAD_DB
4164 thread_db_get_tls_address
,
4169 hostio_last_error_from_errno
,
4172 linux_supports_non_stop
,
4174 linux_start_non_stop
,
4175 linux_supports_multi_process
,
4176 #ifdef USE_THREAD_DB
4177 thread_db_handle_monitor_command
,
4181 linux_core_of_thread
4185 linux_init_signals ()
4187 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4188 to find what the cancel signal actually is. */
4189 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
4190 signal (__SIGRTMIN
+1, SIG_IGN
);
4195 initialize_low (void)
4197 struct sigaction sigchld_action
;
4198 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
4199 set_target_ops (&linux_target_ops
);
4200 set_breakpoint_data (the_low_target
.breakpoint
,
4201 the_low_target
.breakpoint_len
);
4202 linux_init_signals ();
4203 linux_test_for_tracefork ();
4204 #ifdef HAVE_LINUX_REGSETS
4205 for (num_regsets
= 0; target_regsets
[num_regsets
].size
>= 0; num_regsets
++)
4207 disabled_regsets
= xmalloc (num_regsets
);
4210 sigchld_action
.sa_handler
= sigchld_handler
;
4211 sigemptyset (&sigchld_action
.sa_mask
);
4212 sigchld_action
.sa_flags
= SA_RESTART
;
4213 sigaction (SIGCHLD
, &sigchld_action
, NULL
);