1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "linux-osdata.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
28 #include <sys/ptrace.h>
29 #include "linux-ptrace.h"
30 #include "linux-procfs.h"
32 #include <sys/ioctl.h>
38 #include <sys/syscall.h>
42 #include <sys/types.h>
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
56 #define SPUFS_MAGIC 0x23c9b64e
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
74 /* This is the kernel's hard limit. Not to be confused with
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
104 #ifdef HAVE_LINUX_BTRACE
105 # include "linux-btrace.h"
108 #ifndef HAVE_ELF32_AUXV_T
109 /* Copied from glibc's elf.h. */
112 uint32_t a_type
; /* Entry type */
115 uint32_t a_val
; /* Integer value */
116 /* We use to have pointer elements added here. We cannot do that,
117 though, since it does not work when using 32-bit definitions
118 on 64-bit platforms and vice versa. */
123 #ifndef HAVE_ELF64_AUXV_T
124 /* Copied from glibc's elf.h. */
127 uint64_t a_type
; /* Entry type */
130 uint64_t a_val
; /* Integer value */
131 /* We use to have pointer elements added here. We cannot do that,
132 though, since it does not work when using 32-bit definitions
133 on 64-bit platforms and vice versa. */
138 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
139 representation of the thread ID.
141 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
142 the same as the LWP ID.
144 ``all_processes'' is keyed by the "overall process ID", which
145 GNU/Linux calls tgid, "thread group ID". */
147 struct inferior_list all_lwps
;
149 /* A list of all unknown processes which receive stop signals. Some
150 other process will presumably claim each of these as forked
151 children momentarily. */
153 struct simple_pid_list
155 /* The process ID. */
158 /* The status as reported by waitpid. */
162 struct simple_pid_list
*next
;
164 struct simple_pid_list
*stopped_pids
;
166 /* Trivial list manipulation functions to keep track of a list of new
167 stopped processes. */
170 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
172 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
175 new_pid
->status
= status
;
176 new_pid
->next
= *listp
;
181 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
183 struct simple_pid_list
**p
;
185 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
186 if ((*p
)->pid
== pid
)
188 struct simple_pid_list
*next
= (*p
)->next
;
190 *statusp
= (*p
)->status
;
198 enum stopping_threads_kind
200 /* Not stopping threads presently. */
201 NOT_STOPPING_THREADS
,
203 /* Stopping threads. */
206 /* Stopping and suspending threads. */
207 STOPPING_AND_SUSPENDING_THREADS
210 /* This is set while stop_all_lwps is in effect. */
211 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
213 /* FIXME make into a target method? */
214 int using_threads
= 1;
216 /* True if we're presently stabilizing threads (moving them out of
218 static int stabilizing_threads
;
220 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
221 int step
, int signal
, siginfo_t
*info
);
222 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
223 static void stop_all_lwps (int suspend
, struct lwp_info
*except
);
224 static void unstop_all_lwps (int unsuspend
, struct lwp_info
*except
);
225 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
226 static void *add_lwp (ptid_t ptid
);
227 static int linux_stopped_by_watchpoint (void);
228 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
229 static void proceed_all_lwps (void);
230 static int finish_step_over (struct lwp_info
*lwp
);
231 static CORE_ADDR
get_stop_pc (struct lwp_info
*lwp
);
232 static int kill_lwp (unsigned long lwpid
, int signo
);
234 /* True if the low target can hardware single-step. Such targets
235 don't need a BREAKPOINT_REINSERT_ADDR callback. */
238 can_hardware_single_step (void)
240 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
243 /* True if the low target supports memory breakpoints. If so, we'll
244 have a GET_PC implementation. */
247 supports_breakpoints (void)
249 return (the_low_target
.get_pc
!= NULL
);
252 /* Returns true if this target can support fast tracepoints. This
253 does not mean that the in-process agent has been loaded in the
257 supports_fast_tracepoints (void)
259 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
262 /* True if LWP is stopped in its stepping range. */
265 lwp_in_step_range (struct lwp_info
*lwp
)
267 CORE_ADDR pc
= lwp
->stop_pc
;
269 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
272 struct pending_signals
276 struct pending_signals
*prev
;
279 /* The read/write ends of the pipe registered as waitable file in the
281 static int linux_event_pipe
[2] = { -1, -1 };
283 /* True if we're currently in async mode. */
284 #define target_is_async_p() (linux_event_pipe[0] != -1)
286 static void send_sigstop (struct lwp_info
*lwp
);
287 static void wait_for_sigstop (struct inferior_list_entry
*entry
);
289 /* Return non-zero if HEADER is a 64-bit ELF file. */
292 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
294 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
295 && header
->e_ident
[EI_MAG1
] == ELFMAG1
296 && header
->e_ident
[EI_MAG2
] == ELFMAG2
297 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
299 *machine
= header
->e_machine
;
300 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
307 /* Return non-zero if FILE is a 64-bit ELF file,
308 zero if the file is not a 64-bit ELF file,
309 and -1 if the file is not accessible or doesn't exist. */
312 elf_64_file_p (const char *file
, unsigned int *machine
)
317 fd
= open (file
, O_RDONLY
);
321 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
328 return elf_64_header_p (&header
, machine
);
331 /* Accepts an integer PID; Returns true if the executable PID is
332 running is a 64-bit ELF file.. */
335 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
339 sprintf (file
, "/proc/%d/exe", pid
);
340 return elf_64_file_p (file
, machine
);
344 delete_lwp (struct lwp_info
*lwp
)
346 remove_thread (get_lwp_thread (lwp
));
347 remove_inferior (&all_lwps
, &lwp
->head
);
348 free (lwp
->arch_private
);
352 /* Add a process to the common process list, and set its private
355 static struct process_info
*
356 linux_add_process (int pid
, int attached
)
358 struct process_info
*proc
;
360 proc
= add_process (pid
, attached
);
361 proc
->private = xcalloc (1, sizeof (*proc
->private));
363 /* Set the arch when the first LWP stops. */
364 proc
->private->new_inferior
= 1;
366 if (the_low_target
.new_process
!= NULL
)
367 proc
->private->arch_private
= the_low_target
.new_process ();
372 /* Handle a GNU/Linux extended wait response. If we see a clone
373 event, we need to add the new LWP to our list (and not report the
374 trap to higher layers). */
377 handle_extended_wait (struct lwp_info
*event_child
, int wstat
)
379 int event
= wstat
>> 16;
380 struct lwp_info
*new_lwp
;
382 if (event
== PTRACE_EVENT_CLONE
)
385 unsigned long new_pid
;
388 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_child
), (PTRACE_TYPE_ARG3
) 0,
391 /* If we haven't already seen the new PID stop, wait for it now. */
392 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
394 /* The new child has a pending SIGSTOP. We can't affect it until it
395 hits the SIGSTOP, but we're already attached. */
397 ret
= my_waitpid (new_pid
, &status
, __WALL
);
400 perror_with_name ("waiting for new child");
401 else if (ret
!= new_pid
)
402 warning ("wait returned unexpected PID %d", ret
);
403 else if (!WIFSTOPPED (status
))
404 warning ("wait returned unexpected status 0x%x", status
);
407 ptid
= ptid_build (pid_of (event_child
), new_pid
, 0);
408 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
409 add_thread (ptid
, new_lwp
);
411 /* Either we're going to immediately resume the new thread
412 or leave it stopped. linux_resume_one_lwp is a nop if it
413 thinks the thread is currently running, so set this first
414 before calling linux_resume_one_lwp. */
415 new_lwp
->stopped
= 1;
417 /* If we're suspending all threads, leave this one suspended
419 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
)
420 new_lwp
->suspended
= 1;
422 /* Normally we will get the pending SIGSTOP. But in some cases
423 we might get another signal delivered to the group first.
424 If we do get another signal, be sure not to lose it. */
425 if (WSTOPSIG (status
) == SIGSTOP
)
427 if (stopping_threads
!= NOT_STOPPING_THREADS
)
428 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
430 linux_resume_one_lwp (new_lwp
, 0, 0, NULL
);
434 new_lwp
->stop_expected
= 1;
436 if (stopping_threads
!= NOT_STOPPING_THREADS
)
438 new_lwp
->stop_pc
= get_stop_pc (new_lwp
);
439 new_lwp
->status_pending_p
= 1;
440 new_lwp
->status_pending
= status
;
443 /* Pass the signal on. This is what GDB does - except
444 shouldn't we really report it instead? */
445 linux_resume_one_lwp (new_lwp
, 0, WSTOPSIG (status
), NULL
);
448 /* Always resume the current thread. If we are stopping
449 threads, it will have a pending SIGSTOP; we may as well
451 linux_resume_one_lwp (event_child
, event_child
->stepping
, 0, NULL
);
455 /* Return the PC as read from the regcache of LWP, without any
459 get_pc (struct lwp_info
*lwp
)
461 struct thread_info
*saved_inferior
;
462 struct regcache
*regcache
;
465 if (the_low_target
.get_pc
== NULL
)
468 saved_inferior
= current_inferior
;
469 current_inferior
= get_lwp_thread (lwp
);
471 regcache
= get_thread_regcache (current_inferior
, 1);
472 pc
= (*the_low_target
.get_pc
) (regcache
);
475 fprintf (stderr
, "pc is 0x%lx\n", (long) pc
);
477 current_inferior
= saved_inferior
;
481 /* This function should only be called if LWP got a SIGTRAP.
482 The SIGTRAP could mean several things.
484 On i386, where decr_pc_after_break is non-zero:
485 If we were single-stepping this process using PTRACE_SINGLESTEP,
486 we will get only the one SIGTRAP (even if the instruction we
487 stepped over was a breakpoint). The value of $eip will be the
489 If we continue the process using PTRACE_CONT, we will get a
490 SIGTRAP when we hit a breakpoint. The value of $eip will be
491 the instruction after the breakpoint (i.e. needs to be
492 decremented). If we report the SIGTRAP to GDB, we must also
493 report the undecremented PC. If we cancel the SIGTRAP, we
494 must resume at the decremented PC.
496 (Presumably, not yet tested) On a non-decr_pc_after_break machine
497 with hardware or kernel single-step:
498 If we single-step over a breakpoint instruction, our PC will
499 point at the following instruction. If we continue and hit a
500 breakpoint instruction, our PC will point at the breakpoint
504 get_stop_pc (struct lwp_info
*lwp
)
508 if (the_low_target
.get_pc
== NULL
)
511 stop_pc
= get_pc (lwp
);
513 if (WSTOPSIG (lwp
->last_status
) == SIGTRAP
515 && !lwp
->stopped_by_watchpoint
516 && lwp
->last_status
>> 16 == 0)
517 stop_pc
-= the_low_target
.decr_pc_after_break
;
520 fprintf (stderr
, "stop pc is 0x%lx\n", (long) stop_pc
);
526 add_lwp (ptid_t ptid
)
528 struct lwp_info
*lwp
;
530 lwp
= (struct lwp_info
*) xmalloc (sizeof (*lwp
));
531 memset (lwp
, 0, sizeof (*lwp
));
535 if (the_low_target
.new_thread
!= NULL
)
536 lwp
->arch_private
= the_low_target
.new_thread ();
538 add_inferior_to_list (&all_lwps
, &lwp
->head
);
543 /* Start an inferior process and returns its pid.
544 ALLARGS is a vector of program-name and args. */
547 linux_create_inferior (char *program
, char **allargs
)
549 #ifdef HAVE_PERSONALITY
550 int personality_orig
= 0, personality_set
= 0;
552 struct lwp_info
*new_lwp
;
556 #ifdef HAVE_PERSONALITY
557 if (disable_randomization
)
560 personality_orig
= personality (0xffffffff);
561 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
564 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
566 if (errno
!= 0 || (personality_set
567 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
568 warning ("Error disabling address space randomization: %s",
573 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
579 perror_with_name ("fork");
583 ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
585 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
586 signal (__SIGRTMIN
+ 1, SIG_DFL
);
591 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
592 stdout to stderr so that inferior i/o doesn't corrupt the connection.
593 Also, redirect stdin to /dev/null. */
594 if (remote_connection_is_stdio ())
597 open ("/dev/null", O_RDONLY
);
599 if (write (2, "stdin/stdout redirected\n",
600 sizeof ("stdin/stdout redirected\n") - 1) < 0)
602 /* Errors ignored. */;
606 execv (program
, allargs
);
608 execvp (program
, allargs
);
610 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
616 #ifdef HAVE_PERSONALITY
620 personality (personality_orig
);
622 warning ("Error restoring address space randomization: %s",
627 linux_add_process (pid
, 0);
629 ptid
= ptid_build (pid
, pid
, 0);
630 new_lwp
= add_lwp (ptid
);
631 add_thread (ptid
, new_lwp
);
632 new_lwp
->must_set_ptrace_flags
= 1;
637 /* Attach to an inferior process. */
640 linux_attach_lwp_1 (unsigned long lwpid
, int initial
)
643 struct lwp_info
*new_lwp
;
645 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
648 struct buffer buffer
;
652 /* If we fail to attach to an LWP, just warn. */
653 fprintf (stderr
, "Cannot attach to lwp %ld: %s (%d)\n", lwpid
,
654 strerror (errno
), errno
);
659 /* If we fail to attach to a process, report an error. */
660 buffer_init (&buffer
);
661 linux_ptrace_attach_warnings (lwpid
, &buffer
);
662 buffer_grow_str0 (&buffer
, "");
663 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer
),
664 lwpid
, strerror (errno
), errno
);
668 /* If lwp is the tgid, we handle adding existing threads later.
669 Otherwise we just add lwp without bothering about any other
671 ptid
= ptid_build (lwpid
, lwpid
, 0);
674 /* Note that extracting the pid from the current inferior is
675 safe, since we're always called in the context of the same
676 process as this new thread. */
677 int pid
= pid_of (get_thread_lwp (current_inferior
));
678 ptid
= ptid_build (pid
, lwpid
, 0);
681 new_lwp
= (struct lwp_info
*) add_lwp (ptid
);
682 add_thread (ptid
, new_lwp
);
684 /* We need to wait for SIGSTOP before being able to make the next
685 ptrace call on this LWP. */
686 new_lwp
->must_set_ptrace_flags
= 1;
688 if (linux_proc_pid_is_stopped (lwpid
))
692 "Attached to a stopped process\n");
694 /* The process is definitely stopped. It is in a job control
695 stop, unless the kernel predates the TASK_STOPPED /
696 TASK_TRACED distinction, in which case it might be in a
697 ptrace stop. Make sure it is in a ptrace stop; from there we
698 can kill it, signal it, et cetera.
700 First make sure there is a pending SIGSTOP. Since we are
701 already attached, the process can not transition from stopped
702 to running without a PTRACE_CONT; so we know this signal will
703 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
704 probably already in the queue (unless this kernel is old
705 enough to use TASK_STOPPED for ptrace stops); but since
706 SIGSTOP is not an RT signal, it can only be queued once. */
707 kill_lwp (lwpid
, SIGSTOP
);
709 /* Finally, resume the stopped process. This will deliver the
710 SIGSTOP (or a higher priority signal, just like normal
711 PTRACE_ATTACH), which we'll catch later on. */
712 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
715 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
718 There are several cases to consider here:
720 1) gdbserver has already attached to the process and is being notified
721 of a new thread that is being created.
722 In this case we should ignore that SIGSTOP and resume the
723 process. This is handled below by setting stop_expected = 1,
724 and the fact that add_thread sets last_resume_kind ==
727 2) This is the first thread (the process thread), and we're attaching
728 to it via attach_inferior.
729 In this case we want the process thread to stop.
730 This is handled by having linux_attach set last_resume_kind ==
731 resume_stop after we return.
733 If the pid we are attaching to is also the tgid, we attach to and
734 stop all the existing threads. Otherwise, we attach to pid and
735 ignore any other threads in the same group as this pid.
737 3) GDB is connecting to gdbserver and is requesting an enumeration of all
739 In this case we want the thread to stop.
740 FIXME: This case is currently not properly handled.
741 We should wait for the SIGSTOP but don't. Things work apparently
742 because enough time passes between when we ptrace (ATTACH) and when
743 gdb makes the next ptrace call on the thread.
745 On the other hand, if we are currently trying to stop all threads, we
746 should treat the new thread as if we had sent it a SIGSTOP. This works
747 because we are guaranteed that the add_lwp call above added us to the
748 end of the list, and so the new thread has not yet reached
749 wait_for_sigstop (but will). */
750 new_lwp
->stop_expected
= 1;
754 linux_attach_lwp (unsigned long lwpid
)
756 linux_attach_lwp_1 (lwpid
, 0);
759 /* Attach to PID. If PID is the tgid, attach to it and all
763 linux_attach (unsigned long pid
)
765 /* Attach to PID. We will check for other threads
767 linux_attach_lwp_1 (pid
, 1);
768 linux_add_process (pid
, 1);
772 struct thread_info
*thread
;
774 /* Don't ignore the initial SIGSTOP if we just attached to this
775 process. It will be collected by wait shortly. */
776 thread
= find_thread_ptid (ptid_build (pid
, pid
, 0));
777 thread
->last_resume_kind
= resume_stop
;
780 if (linux_proc_get_tgid (pid
) == pid
)
785 sprintf (pathname
, "/proc/%ld/task", pid
);
787 dir
= opendir (pathname
);
791 fprintf (stderr
, "Could not open /proc/%ld/task.\n", pid
);
796 /* At this point we attached to the tgid. Scan the task for
799 int new_threads_found
;
803 while (iterations
< 2)
805 new_threads_found
= 0;
806 /* Add all the other threads. While we go through the
807 threads, new threads may be spawned. Cycle through
808 the list of threads until we have done two iterations without
809 finding new threads. */
810 while ((dp
= readdir (dir
)) != NULL
)
813 lwp
= strtoul (dp
->d_name
, NULL
, 10);
815 /* Is this a new thread? */
817 && find_thread_ptid (ptid_build (pid
, lwp
, 0)) == NULL
)
819 linux_attach_lwp_1 (lwp
, 0);
824 Found and attached to new lwp %ld\n", lwp
);
828 if (!new_threads_found
)
849 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
851 struct counter
*counter
= args
;
853 if (ptid_get_pid (entry
->id
) == counter
->pid
)
855 if (++counter
->count
> 1)
863 last_thread_of_process_p (struct thread_info
*thread
)
865 ptid_t ptid
= ((struct inferior_list_entry
*)thread
)->id
;
866 int pid
= ptid_get_pid (ptid
);
867 struct counter counter
= { pid
, 0 };
869 return (find_inferior (&all_threads
,
870 second_thread_of_pid_p
, &counter
) == NULL
);
876 linux_kill_one_lwp (struct lwp_info
*lwp
)
878 int pid
= lwpid_of (lwp
);
880 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
881 there is no signal context, and ptrace(PTRACE_KILL) (or
882 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
883 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
884 alternative is to kill with SIGKILL. We only need one SIGKILL
885 per process, not one for each thread. But since we still support
886 linuxthreads, and we also support debugging programs using raw
887 clone without CLONE_THREAD, we send one for each thread. For
888 years, we used PTRACE_KILL only, so we're being a bit paranoid
889 about some old kernels where PTRACE_KILL might work better
890 (dubious if there are any such, but that's why it's paranoia), so
891 we try SIGKILL first, PTRACE_KILL second, and so we're fine
898 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
899 target_pid_to_str (ptid_of (lwp
)),
900 errno
? strerror (errno
) : "OK");
903 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
906 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
907 target_pid_to_str (ptid_of (lwp
)),
908 errno
? strerror (errno
) : "OK");
911 /* Callback for `find_inferior'. Kills an lwp of a given process,
912 except the leader. */
915 kill_one_lwp_callback (struct inferior_list_entry
*entry
, void *args
)
917 struct thread_info
*thread
= (struct thread_info
*) entry
;
918 struct lwp_info
*lwp
= get_thread_lwp (thread
);
920 int pid
= * (int *) args
;
922 if (ptid_get_pid (entry
->id
) != pid
)
925 /* We avoid killing the first thread here, because of a Linux kernel (at
926 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
927 the children get a chance to be reaped, it will remain a zombie
930 if (lwpid_of (lwp
) == pid
)
933 fprintf (stderr
, "lkop: is last of process %s\n",
934 target_pid_to_str (entry
->id
));
940 linux_kill_one_lwp (lwp
);
942 /* Make sure it died. The loop is most likely unnecessary. */
943 pid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
944 } while (pid
> 0 && WIFSTOPPED (wstat
));
952 struct process_info
*process
;
953 struct lwp_info
*lwp
;
957 process
= find_process_pid (pid
);
961 /* If we're killing a running inferior, make sure it is stopped
962 first, as PTRACE_KILL will not work otherwise. */
963 stop_all_lwps (0, NULL
);
965 find_inferior (&all_threads
, kill_one_lwp_callback
, &pid
);
967 /* See the comment in linux_kill_one_lwp. We did not kill the first
968 thread in the list, so do so now. */
969 lwp
= find_lwp_pid (pid_to_ptid (pid
));
974 fprintf (stderr
, "lk_1: cannot find lwp %ld, for pid: %d\n",
975 lwpid_of (lwp
), pid
);
980 fprintf (stderr
, "lk_1: killing lwp %ld, for pid: %d\n",
981 lwpid_of (lwp
), pid
);
985 linux_kill_one_lwp (lwp
);
987 /* Make sure it died. The loop is most likely unnecessary. */
988 lwpid
= linux_wait_for_event (lwp
->head
.id
, &wstat
, __WALL
);
989 } while (lwpid
> 0 && WIFSTOPPED (wstat
));
992 the_target
->mourn (process
);
994 /* Since we presently can only stop all lwps of all processes, we
995 need to unstop lwps of other processes. */
996 unstop_all_lwps (0, NULL
);
1000 /* Get pending signal of THREAD, for detaching purposes. This is the
1001 signal the thread last stopped for, which we need to deliver to the
1002 thread when detaching, otherwise, it'd be suppressed/lost. */
1005 get_detach_signal (struct thread_info
*thread
)
1007 enum gdb_signal signo
= GDB_SIGNAL_0
;
1009 struct lwp_info
*lp
= get_thread_lwp (thread
);
1011 if (lp
->status_pending_p
)
1012 status
= lp
->status_pending
;
1015 /* If the thread had been suspended by gdbserver, and it stopped
1016 cleanly, then it'll have stopped with SIGSTOP. But we don't
1017 want to deliver that SIGSTOP. */
1018 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1019 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1022 /* Otherwise, we may need to deliver the signal we
1024 status
= lp
->last_status
;
1027 if (!WIFSTOPPED (status
))
1031 "GPS: lwp %s hasn't stopped: no pending signal\n",
1032 target_pid_to_str (ptid_of (lp
)));
1036 /* Extended wait statuses aren't real SIGTRAPs. */
1037 if (WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
1041 "GPS: lwp %s had stopped with extended "
1042 "status: no pending signal\n",
1043 target_pid_to_str (ptid_of (lp
)));
1047 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1049 if (program_signals_p
&& !program_signals
[signo
])
1053 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1054 target_pid_to_str (ptid_of (lp
)),
1055 gdb_signal_to_string (signo
));
1058 else if (!program_signals_p
1059 /* If we have no way to know which signals GDB does not
1060 want to have passed to the program, assume
1061 SIGTRAP/SIGINT, which is GDB's default. */
1062 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1066 "GPS: lwp %s had signal %s, "
1067 "but we don't know if we should pass it. Default to not.\n",
1068 target_pid_to_str (ptid_of (lp
)),
1069 gdb_signal_to_string (signo
));
1076 "GPS: lwp %s has pending signal %s: delivering it.\n",
1077 target_pid_to_str (ptid_of (lp
)),
1078 gdb_signal_to_string (signo
));
1080 return WSTOPSIG (status
);
1085 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
1087 struct thread_info
*thread
= (struct thread_info
*) entry
;
1088 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1089 int pid
= * (int *) args
;
1092 if (ptid_get_pid (entry
->id
) != pid
)
1095 /* If there is a pending SIGSTOP, get rid of it. */
1096 if (lwp
->stop_expected
)
1100 "Sending SIGCONT to %s\n",
1101 target_pid_to_str (ptid_of (lwp
)));
1103 kill_lwp (lwpid_of (lwp
), SIGCONT
);
1104 lwp
->stop_expected
= 0;
1107 /* Flush any pending changes to the process's registers. */
1108 regcache_invalidate_thread (get_lwp_thread (lwp
));
1110 /* Pass on any pending signal for this thread. */
1111 sig
= get_detach_signal (thread
);
1113 /* Finally, let it resume. */
1114 if (the_low_target
.prepare_to_resume
!= NULL
)
1115 the_low_target
.prepare_to_resume (lwp
);
1116 if (ptrace (PTRACE_DETACH
, lwpid_of (lwp
), (PTRACE_TYPE_ARG3
) 0,
1117 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1118 error (_("Can't detach %s: %s"),
1119 target_pid_to_str (ptid_of (lwp
)),
1127 linux_detach (int pid
)
1129 struct process_info
*process
;
1131 process
= find_process_pid (pid
);
1132 if (process
== NULL
)
1135 /* Stop all threads before detaching. First, ptrace requires that
1136 the thread is stopped to sucessfully detach. Second, thread_db
1137 may need to uninstall thread event breakpoints from memory, which
1138 only works with a stopped process anyway. */
1139 stop_all_lwps (0, NULL
);
1141 #ifdef USE_THREAD_DB
1142 thread_db_detach (process
);
1145 /* Stabilize threads (move out of jump pads). */
1146 stabilize_threads ();
1148 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
1150 the_target
->mourn (process
);
1152 /* Since we presently can only stop all lwps of all processes, we
1153 need to unstop lwps of other processes. */
1154 unstop_all_lwps (0, NULL
);
1158 /* Remove all LWPs that belong to process PROC from the lwp list. */
1161 delete_lwp_callback (struct inferior_list_entry
*entry
, void *proc
)
1163 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
1164 struct process_info
*process
= proc
;
1166 if (pid_of (lwp
) == pid_of (process
))
1173 linux_mourn (struct process_info
*process
)
1175 struct process_info_private
*priv
;
1177 #ifdef USE_THREAD_DB
1178 thread_db_mourn (process
);
1181 find_inferior (&all_lwps
, delete_lwp_callback
, process
);
1183 /* Freeing all private data. */
1184 priv
= process
->private;
1185 free (priv
->arch_private
);
1187 process
->private = NULL
;
1189 remove_process (process
);
1193 linux_join (int pid
)
1198 ret
= my_waitpid (pid
, &status
, 0);
1199 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1201 } while (ret
!= -1 || errno
!= ECHILD
);
1204 /* Return nonzero if the given thread is still alive. */
1206 linux_thread_alive (ptid_t ptid
)
1208 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1210 /* We assume we always know if a thread exits. If a whole process
1211 exited but we still haven't been able to report it to GDB, we'll
1212 hold on to the last lwp of the dead process. */
1219 /* Return 1 if this lwp has an interesting status pending. */
1221 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
1223 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
1224 ptid_t ptid
= * (ptid_t
*) arg
;
1225 struct thread_info
*thread
;
1227 /* Check if we're only interested in events from a specific process
1229 if (!ptid_equal (minus_one_ptid
, ptid
)
1230 && ptid_get_pid (ptid
) != ptid_get_pid (lwp
->head
.id
))
1233 thread
= get_lwp_thread (lwp
);
1235 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1236 report any status pending the LWP may have. */
1237 if (thread
->last_resume_kind
== resume_stop
1238 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
1241 return lwp
->status_pending_p
;
1245 same_lwp (struct inferior_list_entry
*entry
, void *data
)
1247 ptid_t ptid
= *(ptid_t
*) data
;
1250 if (ptid_get_lwp (ptid
) != 0)
1251 lwp
= ptid_get_lwp (ptid
);
1253 lwp
= ptid_get_pid (ptid
);
1255 if (ptid_get_lwp (entry
->id
) == lwp
)
1262 find_lwp_pid (ptid_t ptid
)
1264 return (struct lwp_info
*) find_inferior (&all_lwps
, same_lwp
, &ptid
);
1267 static struct lwp_info
*
1268 linux_wait_for_lwp (ptid_t ptid
, int *wstatp
, int options
)
1271 int to_wait_for
= -1;
1272 struct lwp_info
*child
= NULL
;
1275 fprintf (stderr
, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid
));
1277 if (ptid_equal (ptid
, minus_one_ptid
))
1278 to_wait_for
= -1; /* any child */
1280 to_wait_for
= ptid_get_lwp (ptid
); /* this lwp only */
1286 ret
= my_waitpid (to_wait_for
, wstatp
, options
);
1287 if (ret
== 0 || (ret
== -1 && errno
== ECHILD
&& (options
& WNOHANG
)))
1290 perror_with_name ("waitpid");
1293 && (!WIFSTOPPED (*wstatp
)
1294 || (WSTOPSIG (*wstatp
) != 32
1295 && WSTOPSIG (*wstatp
) != 33)))
1296 fprintf (stderr
, "Got an event from %d (%x)\n", ret
, *wstatp
);
1298 child
= find_lwp_pid (pid_to_ptid (ret
));
1300 /* If we didn't find a process, one of two things presumably happened:
1301 - A process we started and then detached from has exited. Ignore it.
1302 - A process we are controlling has forked and the new child's stop
1303 was reported to us by the kernel. Save its PID. */
1304 if (child
== NULL
&& WIFSTOPPED (*wstatp
))
1306 add_to_pid_list (&stopped_pids
, ret
, *wstatp
);
1309 else if (child
== NULL
)
1314 child
->last_status
= *wstatp
;
1316 if (WIFSTOPPED (*wstatp
))
1318 struct process_info
*proc
;
1320 /* Architecture-specific setup after inferior is running. This
1321 needs to happen after we have attached to the inferior and it
1322 is stopped for the first time, but before we access any
1323 inferior registers. */
1324 proc
= find_process_pid (pid_of (child
));
1325 if (proc
->private->new_inferior
)
1327 struct thread_info
*saved_inferior
;
1329 saved_inferior
= current_inferior
;
1330 current_inferior
= get_lwp_thread (child
);
1332 the_low_target
.arch_setup ();
1334 current_inferior
= saved_inferior
;
1336 proc
->private->new_inferior
= 0;
1340 /* Fetch the possibly triggered data watchpoint info and store it in
1343 On some archs, like x86, that use debug registers to set
1344 watchpoints, it's possible that the way to know which watched
1345 address trapped, is to check the register that is used to select
1346 which address to watch. Problem is, between setting the
1347 watchpoint and reading back which data address trapped, the user
1348 may change the set of watchpoints, and, as a consequence, GDB
1349 changes the debug registers in the inferior. To avoid reading
1350 back a stale stopped-data-address when that happens, we cache in
1351 LP the fact that a watchpoint trapped, and the corresponding data
1352 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1353 changes the debug registers meanwhile, we have the cached data we
1356 if (WIFSTOPPED (*wstatp
) && WSTOPSIG (*wstatp
) == SIGTRAP
)
1358 if (the_low_target
.stopped_by_watchpoint
== NULL
)
1360 child
->stopped_by_watchpoint
= 0;
1364 struct thread_info
*saved_inferior
;
1366 saved_inferior
= current_inferior
;
1367 current_inferior
= get_lwp_thread (child
);
1369 child
->stopped_by_watchpoint
1370 = the_low_target
.stopped_by_watchpoint ();
1372 if (child
->stopped_by_watchpoint
)
1374 if (the_low_target
.stopped_data_address
!= NULL
)
1375 child
->stopped_data_address
1376 = the_low_target
.stopped_data_address ();
1378 child
->stopped_data_address
= 0;
1381 current_inferior
= saved_inferior
;
1385 /* Store the STOP_PC, with adjustment applied. This depends on the
1386 architecture being defined already (so that CHILD has a valid
1387 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1389 if (WIFSTOPPED (*wstatp
))
1390 child
->stop_pc
= get_stop_pc (child
);
1393 && WIFSTOPPED (*wstatp
)
1394 && the_low_target
.get_pc
!= NULL
)
1396 struct thread_info
*saved_inferior
= current_inferior
;
1397 struct regcache
*regcache
;
1400 current_inferior
= get_lwp_thread (child
);
1401 regcache
= get_thread_regcache (current_inferior
, 1);
1402 pc
= (*the_low_target
.get_pc
) (regcache
);
1403 fprintf (stderr
, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc
);
1404 current_inferior
= saved_inferior
;
1410 /* This function should only be called if the LWP got a SIGTRAP.
1412 Handle any tracepoint steps or hits. Return true if a tracepoint
1413 event was handled, 0 otherwise. */
1416 handle_tracepoints (struct lwp_info
*lwp
)
1418 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1419 int tpoint_related_event
= 0;
1421 /* If this tracepoint hit causes a tracing stop, we'll immediately
1422 uninsert tracepoints. To do this, we temporarily pause all
1423 threads, unpatch away, and then unpause threads. We need to make
1424 sure the unpausing doesn't resume LWP too. */
1427 /* And we need to be sure that any all-threads-stopping doesn't try
1428 to move threads out of the jump pads, as it could deadlock the
1429 inferior (LWP could be in the jump pad, maybe even holding the
1432 /* Do any necessary step collect actions. */
1433 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1435 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1437 /* See if we just hit a tracepoint and do its main collect
1439 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1443 gdb_assert (lwp
->suspended
== 0);
1444 gdb_assert (!stabilizing_threads
|| lwp
->collecting_fast_tracepoint
);
1446 if (tpoint_related_event
)
1449 fprintf (stderr
, "got a tracepoint event\n");
1456 /* Convenience wrapper. Returns true if LWP is presently collecting a
1460 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
1461 struct fast_tpoint_collect_status
*status
)
1463 CORE_ADDR thread_area
;
1465 if (the_low_target
.get_thread_area
== NULL
)
1468 /* Get the thread area address. This is used to recognize which
1469 thread is which when tracing with the in-process agent library.
1470 We don't read anything from the address, and treat it as opaque;
1471 it's the address itself that we assume is unique per-thread. */
1472 if ((*the_low_target
.get_thread_area
) (lwpid_of (lwp
), &thread_area
) == -1)
1475 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1478 /* The reason we resume in the caller, is because we want to be able
1479 to pass lwp->status_pending as WSTAT, and we need to clear
1480 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1481 refuses to resume. */
1484 maybe_move_out_of_jump_pad (struct lwp_info
*lwp
, int *wstat
)
1486 struct thread_info
*saved_inferior
;
1488 saved_inferior
= current_inferior
;
1489 current_inferior
= get_lwp_thread (lwp
);
1492 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1493 && supports_fast_tracepoints ()
1494 && agent_loaded_p ())
1496 struct fast_tpoint_collect_status status
;
1501 Checking whether LWP %ld needs to move out of the jump pad.\n",
1504 r
= linux_fast_tracepoint_collecting (lwp
, &status
);
1507 || (WSTOPSIG (*wstat
) != SIGILL
1508 && WSTOPSIG (*wstat
) != SIGFPE
1509 && WSTOPSIG (*wstat
) != SIGSEGV
1510 && WSTOPSIG (*wstat
) != SIGBUS
))
1512 lwp
->collecting_fast_tracepoint
= r
;
1516 if (r
== 1 && lwp
->exit_jump_pad_bkpt
== NULL
)
1518 /* Haven't executed the original instruction yet.
1519 Set breakpoint there, and wait till it's hit,
1520 then single-step until exiting the jump pad. */
1521 lwp
->exit_jump_pad_bkpt
1522 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
1527 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1529 current_inferior
= saved_inferior
;
1536 /* If we get a synchronous signal while collecting, *and*
1537 while executing the (relocated) original instruction,
1538 reset the PC to point at the tpoint address, before
1539 reporting to GDB. Otherwise, it's an IPA lib bug: just
1540 report the signal to GDB, and pray for the best. */
1542 lwp
->collecting_fast_tracepoint
= 0;
1545 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
1546 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
1549 struct regcache
*regcache
;
1551 /* The si_addr on a few signals references the address
1552 of the faulting instruction. Adjust that as
1554 if ((WSTOPSIG (*wstat
) == SIGILL
1555 || WSTOPSIG (*wstat
) == SIGFPE
1556 || WSTOPSIG (*wstat
) == SIGBUS
1557 || WSTOPSIG (*wstat
) == SIGSEGV
)
1558 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
),
1559 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
1560 /* Final check just to make sure we don't clobber
1561 the siginfo of non-kernel-sent signals. */
1562 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
1564 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
1565 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
),
1566 (PTRACE_TYPE_ARG3
) 0, &info
);
1569 regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
1570 (*the_low_target
.set_pc
) (regcache
, status
.tpoint_addr
);
1571 lwp
->stop_pc
= status
.tpoint_addr
;
1573 /* Cancel any fast tracepoint lock this thread was
1575 force_unlock_trace_buffer ();
1578 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
1582 "Cancelling fast exit-jump-pad: removing bkpt. "
1583 "stopping all threads momentarily.\n");
1585 stop_all_lwps (1, lwp
);
1586 cancel_breakpoints ();
1588 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
1589 lwp
->exit_jump_pad_bkpt
= NULL
;
1591 unstop_all_lwps (1, lwp
);
1593 gdb_assert (lwp
->suspended
>= 0);
1600 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1603 current_inferior
= saved_inferior
;
1607 /* Enqueue one signal in the "signals to report later when out of the
1611 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1613 struct pending_signals
*p_sig
;
1617 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat
), lwpid_of (lwp
));
1621 struct pending_signals
*sig
;
1623 for (sig
= lwp
->pending_signals_to_report
;
1627 " Already queued %d\n",
1630 fprintf (stderr
, " (no more currently queued signals)\n");
1633 /* Don't enqueue non-RT signals if they are already in the deferred
1634 queue. (SIGSTOP being the easiest signal to see ending up here
1636 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
1638 struct pending_signals
*sig
;
1640 for (sig
= lwp
->pending_signals_to_report
;
1644 if (sig
->signal
== WSTOPSIG (*wstat
))
1648 "Not requeuing already queued non-RT signal %d"
1657 p_sig
= xmalloc (sizeof (*p_sig
));
1658 p_sig
->prev
= lwp
->pending_signals_to_report
;
1659 p_sig
->signal
= WSTOPSIG (*wstat
);
1660 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
1661 ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
), (PTRACE_TYPE_ARG3
) 0,
1664 lwp
->pending_signals_to_report
= p_sig
;
1667 /* Dequeue one signal from the "signals to report later when out of
1668 the jump pad" list. */
1671 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1673 if (lwp
->pending_signals_to_report
!= NULL
)
1675 struct pending_signals
**p_sig
;
1677 p_sig
= &lwp
->pending_signals_to_report
;
1678 while ((*p_sig
)->prev
!= NULL
)
1679 p_sig
= &(*p_sig
)->prev
;
1681 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
1682 if ((*p_sig
)->info
.si_signo
!= 0)
1683 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
), (PTRACE_TYPE_ARG3
) 0,
1689 fprintf (stderr
, "Reporting deferred signal %d for LWP %ld.\n",
1690 WSTOPSIG (*wstat
), lwpid_of (lwp
));
1694 struct pending_signals
*sig
;
1696 for (sig
= lwp
->pending_signals_to_report
;
1700 " Still queued %d\n",
1703 fprintf (stderr
, " (no more queued signals)\n");
1712 /* Arrange for a breakpoint to be hit again later. We don't keep the
1713 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1714 will handle the current event, eventually we will resume this LWP,
1715 and this breakpoint will trap again. */
1718 cancel_breakpoint (struct lwp_info
*lwp
)
1720 struct thread_info
*saved_inferior
;
1722 /* There's nothing to do if we don't support breakpoints. */
1723 if (!supports_breakpoints ())
1726 /* breakpoint_at reads from current inferior. */
1727 saved_inferior
= current_inferior
;
1728 current_inferior
= get_lwp_thread (lwp
);
1730 if ((*the_low_target
.breakpoint_at
) (lwp
->stop_pc
))
1734 "CB: Push back breakpoint for %s\n",
1735 target_pid_to_str (ptid_of (lwp
)));
1737 /* Back up the PC if necessary. */
1738 if (the_low_target
.decr_pc_after_break
)
1740 struct regcache
*regcache
1741 = get_thread_regcache (current_inferior
, 1);
1742 (*the_low_target
.set_pc
) (regcache
, lwp
->stop_pc
);
1745 current_inferior
= saved_inferior
;
1752 "CB: No breakpoint found at %s for [%s]\n",
1753 paddress (lwp
->stop_pc
),
1754 target_pid_to_str (ptid_of (lwp
)));
1757 current_inferior
= saved_inferior
;
1761 /* When the event-loop is doing a step-over, this points at the thread
1763 ptid_t step_over_bkpt
;
1765 /* Wait for an event from child PID. If PID is -1, wait for any
1766 child. Store the stop status through the status pointer WSTAT.
1767 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1768 event was found and OPTIONS contains WNOHANG. Return the PID of
1769 the stopped child otherwise. */
1772 linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
)
1774 struct lwp_info
*event_child
, *requested_child
;
1778 requested_child
= NULL
;
1780 /* Check for a lwp with a pending status. */
1782 if (ptid_equal (ptid
, minus_one_ptid
) || ptid_is_pid (ptid
))
1784 event_child
= (struct lwp_info
*)
1785 find_inferior (&all_lwps
, status_pending_p_callback
, &ptid
);
1786 if (debug_threads
&& event_child
)
1787 fprintf (stderr
, "Got a pending child %ld\n", lwpid_of (event_child
));
1791 requested_child
= find_lwp_pid (ptid
);
1793 if (stopping_threads
== NOT_STOPPING_THREADS
1794 && requested_child
->status_pending_p
1795 && requested_child
->collecting_fast_tracepoint
)
1797 enqueue_one_deferred_signal (requested_child
,
1798 &requested_child
->status_pending
);
1799 requested_child
->status_pending_p
= 0;
1800 requested_child
->status_pending
= 0;
1801 linux_resume_one_lwp (requested_child
, 0, 0, NULL
);
1804 if (requested_child
->suspended
1805 && requested_child
->status_pending_p
)
1806 fatal ("requesting an event out of a suspended child?");
1808 if (requested_child
->status_pending_p
)
1809 event_child
= requested_child
;
1812 if (event_child
!= NULL
)
1815 fprintf (stderr
, "Got an event from pending child %ld (%04x)\n",
1816 lwpid_of (event_child
), event_child
->status_pending
);
1817 *wstat
= event_child
->status_pending
;
1818 event_child
->status_pending_p
= 0;
1819 event_child
->status_pending
= 0;
1820 current_inferior
= get_lwp_thread (event_child
);
1821 return lwpid_of (event_child
);
1824 if (ptid_is_pid (ptid
))
1826 /* A request to wait for a specific tgid. This is not possible
1827 with waitpid, so instead, we wait for any child, and leave
1828 children we're not interested in right now with a pending
1829 status to report later. */
1830 wait_ptid
= minus_one_ptid
;
1835 /* We only enter this loop if no process has a pending wait status. Thus
1836 any action taken in response to a wait status inside this loop is
1837 responding as soon as we detect the status, not after any pending
1841 event_child
= linux_wait_for_lwp (wait_ptid
, wstat
, options
);
1843 if ((options
& WNOHANG
) && event_child
== NULL
)
1846 fprintf (stderr
, "WNOHANG set, no event found\n");
1850 if (event_child
== NULL
)
1851 error ("event from unknown child");
1853 if (ptid_is_pid (ptid
)
1854 && ptid_get_pid (ptid
) != ptid_get_pid (ptid_of (event_child
)))
1856 if (! WIFSTOPPED (*wstat
))
1857 mark_lwp_dead (event_child
, *wstat
);
1860 event_child
->status_pending_p
= 1;
1861 event_child
->status_pending
= *wstat
;
1866 current_inferior
= get_lwp_thread (event_child
);
1868 /* Check for thread exit. */
1869 if (! WIFSTOPPED (*wstat
))
1872 fprintf (stderr
, "LWP %ld exiting\n", lwpid_of (event_child
));
1874 /* If the last thread is exiting, just return. */
1875 if (last_thread_of_process_p (current_inferior
))
1878 fprintf (stderr
, "LWP %ld is last lwp of process\n",
1879 lwpid_of (event_child
));
1880 return lwpid_of (event_child
);
1885 current_inferior
= (struct thread_info
*) all_threads
.head
;
1887 fprintf (stderr
, "Current inferior is now %ld\n",
1888 lwpid_of (get_thread_lwp (current_inferior
)));
1892 current_inferior
= NULL
;
1894 fprintf (stderr
, "Current inferior is now <NULL>\n");
1897 /* If we were waiting for this particular child to do something...
1898 well, it did something. */
1899 if (requested_child
!= NULL
)
1901 int lwpid
= lwpid_of (event_child
);
1903 /* Cancel the step-over operation --- the thread that
1904 started it is gone. */
1905 if (finish_step_over (event_child
))
1906 unstop_all_lwps (1, event_child
);
1907 delete_lwp (event_child
);
1911 delete_lwp (event_child
);
1913 /* Wait for a more interesting event. */
1917 if (event_child
->must_set_ptrace_flags
)
1919 linux_enable_event_reporting (lwpid_of (event_child
));
1920 event_child
->must_set_ptrace_flags
= 0;
1923 if (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) == SIGTRAP
1924 && *wstat
>> 16 != 0)
1926 handle_extended_wait (event_child
, *wstat
);
1930 if (WIFSTOPPED (*wstat
)
1931 && WSTOPSIG (*wstat
) == SIGSTOP
1932 && event_child
->stop_expected
)
1937 fprintf (stderr
, "Expected stop.\n");
1938 event_child
->stop_expected
= 0;
1940 should_stop
= (current_inferior
->last_resume_kind
== resume_stop
1941 || stopping_threads
!= NOT_STOPPING_THREADS
);
1945 linux_resume_one_lwp (event_child
,
1946 event_child
->stepping
, 0, NULL
);
1951 return lwpid_of (event_child
);
1958 /* Count the LWP's that have had events. */
1961 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
1963 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1964 struct thread_info
*thread
= get_lwp_thread (lp
);
1967 gdb_assert (count
!= NULL
);
1969 /* Count only resumed LWPs that have a SIGTRAP event pending that
1970 should be reported to GDB. */
1971 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1972 && thread
->last_resume_kind
!= resume_stop
1973 && lp
->status_pending_p
1974 && WIFSTOPPED (lp
->status_pending
)
1975 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
1976 && !breakpoint_inserted_here (lp
->stop_pc
))
1982 /* Select the LWP (if any) that is currently being single-stepped. */
1985 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
1987 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
1988 struct thread_info
*thread
= get_lwp_thread (lp
);
1990 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
1991 && thread
->last_resume_kind
== resume_step
1992 && lp
->status_pending_p
)
1998 /* Select the Nth LWP that has had a SIGTRAP event that should be
2002 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2004 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
2005 struct thread_info
*thread
= get_lwp_thread (lp
);
2006 int *selector
= data
;
2008 gdb_assert (selector
!= NULL
);
2010 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2011 if (thread
->last_resume_kind
!= resume_stop
2012 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2013 && lp
->status_pending_p
2014 && WIFSTOPPED (lp
->status_pending
)
2015 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
2016 && !breakpoint_inserted_here (lp
->stop_pc
))
2017 if ((*selector
)-- == 0)
2024 cancel_breakpoints_callback (struct inferior_list_entry
*entry
, void *data
)
2026 struct lwp_info
*lp
= (struct lwp_info
*) entry
;
2027 struct thread_info
*thread
= get_lwp_thread (lp
);
2028 struct lwp_info
*event_lp
= data
;
2030 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2034 /* If a LWP other than the LWP that we're reporting an event for has
2035 hit a GDB breakpoint (as opposed to some random trap signal),
2036 then just arrange for it to hit it again later. We don't keep
2037 the SIGTRAP status and don't forward the SIGTRAP signal to the
2038 LWP. We will handle the current event, eventually we will resume
2039 all LWPs, and this one will get its breakpoint trap again.
2041 If we do not do this, then we run the risk that the user will
2042 delete or disable the breakpoint, but the LWP will have already
2045 if (thread
->last_resume_kind
!= resume_stop
2046 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2047 && lp
->status_pending_p
2048 && WIFSTOPPED (lp
->status_pending
)
2049 && WSTOPSIG (lp
->status_pending
) == SIGTRAP
2051 && !lp
->stopped_by_watchpoint
2052 && cancel_breakpoint (lp
))
2053 /* Throw away the SIGTRAP. */
2054 lp
->status_pending_p
= 0;
2060 linux_cancel_breakpoints (void)
2062 find_inferior (&all_lwps
, cancel_breakpoints_callback
, NULL
);
2065 /* Select one LWP out of those that have events pending. */
2068 select_event_lwp (struct lwp_info
**orig_lp
)
2071 int random_selector
;
2072 struct lwp_info
*event_lp
;
2074 /* Give preference to any LWP that is being single-stepped. */
2076 = (struct lwp_info
*) find_inferior (&all_lwps
,
2077 select_singlestep_lwp_callback
, NULL
);
2078 if (event_lp
!= NULL
)
2082 "SEL: Select single-step %s\n",
2083 target_pid_to_str (ptid_of (event_lp
)));
2087 /* No single-stepping LWP. Select one at random, out of those
2088 which have had SIGTRAP events. */
2090 /* First see how many SIGTRAP events we have. */
2091 find_inferior (&all_lwps
, count_events_callback
, &num_events
);
2093 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2094 random_selector
= (int)
2095 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2097 if (debug_threads
&& num_events
> 1)
2099 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2100 num_events
, random_selector
);
2102 event_lp
= (struct lwp_info
*) find_inferior (&all_lwps
,
2103 select_event_lwp_callback
,
2107 if (event_lp
!= NULL
)
2109 /* Switch the event LWP. */
2110 *orig_lp
= event_lp
;
2114 /* Decrement the suspend count of an LWP. */
2117 unsuspend_one_lwp (struct inferior_list_entry
*entry
, void *except
)
2119 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2121 /* Ignore EXCEPT. */
2127 gdb_assert (lwp
->suspended
>= 0);
2131 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2135 unsuspend_all_lwps (struct lwp_info
*except
)
2137 find_inferior (&all_lwps
, unsuspend_one_lwp
, except
);
2140 static void move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
);
2141 static int stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
,
2143 static int lwp_running (struct inferior_list_entry
*entry
, void *data
);
2144 static ptid_t
linux_wait_1 (ptid_t ptid
,
2145 struct target_waitstatus
*ourstatus
,
2146 int target_options
);
2148 /* Stabilize threads (move out of jump pads).
2150 If a thread is midway collecting a fast tracepoint, we need to
2151 finish the collection and move it out of the jump pad before
2152 reporting the signal.
2154 This avoids recursion while collecting (when a signal arrives
2155 midway, and the signal handler itself collects), which would trash
2156 the trace buffer. In case the user set a breakpoint in a signal
2157 handler, this avoids the backtrace showing the jump pad, etc..
2158 Most importantly, there are certain things we can't do safely if
2159 threads are stopped in a jump pad (or in its callee's). For
2162 - starting a new trace run. A thread still collecting the
2163 previous run, could trash the trace buffer when resumed. The trace
2164 buffer control structures would have been reset but the thread had
2165 no way to tell. The thread could even midway memcpy'ing to the
2166 buffer, which would mean that when resumed, it would clobber the
2167 trace buffer that had been set for a new run.
2169 - we can't rewrite/reuse the jump pads for new tracepoints
2170 safely. Say you do tstart while a thread is stopped midway while
2171 collecting. When the thread is later resumed, it finishes the
2172 collection, and returns to the jump pad, to execute the original
2173 instruction that was under the tracepoint jump at the time the
2174 older run had been started. If the jump pad had been rewritten
2175 since for something else in the new run, the thread would now
2176 execute the wrong / random instructions. */
2179 linux_stabilize_threads (void)
2181 struct thread_info
*save_inferior
;
2182 struct lwp_info
*lwp_stuck
;
2185 = (struct lwp_info
*) find_inferior (&all_lwps
,
2186 stuck_in_jump_pad_callback
, NULL
);
2187 if (lwp_stuck
!= NULL
)
2190 fprintf (stderr
, "can't stabilize, LWP %ld is stuck in jump pad\n",
2191 lwpid_of (lwp_stuck
));
2195 save_inferior
= current_inferior
;
2197 stabilizing_threads
= 1;
2200 for_each_inferior (&all_lwps
, move_out_of_jump_pad_callback
);
2202 /* Loop until all are stopped out of the jump pads. */
2203 while (find_inferior (&all_lwps
, lwp_running
, NULL
) != NULL
)
2205 struct target_waitstatus ourstatus
;
2206 struct lwp_info
*lwp
;
2209 /* Note that we go through the full wait even loop. While
2210 moving threads out of jump pad, we need to be able to step
2211 over internal breakpoints and such. */
2212 linux_wait_1 (minus_one_ptid
, &ourstatus
, 0);
2214 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2216 lwp
= get_thread_lwp (current_inferior
);
2221 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2222 || current_inferior
->last_resume_kind
== resume_stop
)
2224 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2225 enqueue_one_deferred_signal (lwp
, &wstat
);
2230 find_inferior (&all_lwps
, unsuspend_one_lwp
, NULL
);
2232 stabilizing_threads
= 0;
2234 current_inferior
= save_inferior
;
2239 = (struct lwp_info
*) find_inferior (&all_lwps
,
2240 stuck_in_jump_pad_callback
, NULL
);
2241 if (lwp_stuck
!= NULL
)
2242 fprintf (stderr
, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2243 lwpid_of (lwp_stuck
));
2247 /* Wait for process, returns status. */
2250 linux_wait_1 (ptid_t ptid
,
2251 struct target_waitstatus
*ourstatus
, int target_options
)
2254 struct lwp_info
*event_child
;
2257 int step_over_finished
;
2258 int bp_explains_trap
;
2259 int maybe_internal_trap
;
2264 /* Translate generic target options into linux options. */
2266 if (target_options
& TARGET_WNOHANG
)
2270 bp_explains_trap
= 0;
2273 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2275 /* If we were only supposed to resume one thread, only wait for
2276 that thread - if it's still alive. If it died, however - which
2277 can happen if we're coming from the thread death case below -
2278 then we need to make sure we restart the other threads. We could
2279 pick a thread at random or restart all; restarting all is less
2282 && !ptid_equal (cont_thread
, null_ptid
)
2283 && !ptid_equal (cont_thread
, minus_one_ptid
))
2285 struct thread_info
*thread
;
2287 thread
= (struct thread_info
*) find_inferior_id (&all_threads
,
2290 /* No stepping, no signal - unless one is pending already, of course. */
2293 struct thread_resume resume_info
;
2294 resume_info
.thread
= minus_one_ptid
;
2295 resume_info
.kind
= resume_continue
;
2296 resume_info
.sig
= 0;
2297 linux_resume (&resume_info
, 1);
2303 if (ptid_equal (step_over_bkpt
, null_ptid
))
2304 pid
= linux_wait_for_event (ptid
, &w
, options
);
2308 fprintf (stderr
, "step_over_bkpt set [%s], doing a blocking wait\n",
2309 target_pid_to_str (step_over_bkpt
));
2310 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
2313 if (pid
== 0) /* only if TARGET_WNOHANG */
2316 event_child
= get_thread_lwp (current_inferior
);
2318 /* If we are waiting for a particular child, and it exited,
2319 linux_wait_for_event will return its exit status. Similarly if
2320 the last child exited. If this is not the last child, however,
2321 do not report it as exited until there is a 'thread exited' response
2322 available in the remote protocol. Instead, just wait for another event.
2323 This should be safe, because if the thread crashed we will already
2324 have reported the termination signal to GDB; that should stop any
2325 in-progress stepping operations, etc.
2327 Report the exit status of the last thread to exit. This matches
2328 LinuxThreads' behavior. */
2330 if (last_thread_of_process_p (current_inferior
))
2332 if (WIFEXITED (w
) || WIFSIGNALED (w
))
2336 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
2337 ourstatus
->value
.integer
= WEXITSTATUS (w
);
2341 "\nChild exited with retcode = %x \n",
2346 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
2347 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
2351 "\nChild terminated with signal = %x \n",
2356 return ptid_of (event_child
);
2361 if (!WIFSTOPPED (w
))
2365 /* If this event was not handled before, and is not a SIGTRAP, we
2366 report it. SIGILL and SIGSEGV are also treated as traps in case
2367 a breakpoint is inserted at the current PC. If this target does
2368 not support internal breakpoints at all, we also report the
2369 SIGTRAP without further processing; it's of no concern to us. */
2371 = (supports_breakpoints ()
2372 && (WSTOPSIG (w
) == SIGTRAP
2373 || ((WSTOPSIG (w
) == SIGILL
2374 || WSTOPSIG (w
) == SIGSEGV
)
2375 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
2377 if (maybe_internal_trap
)
2379 /* Handle anything that requires bookkeeping before deciding to
2380 report the event or continue waiting. */
2382 /* First check if we can explain the SIGTRAP with an internal
2383 breakpoint, or if we should possibly report the event to GDB.
2384 Do this before anything that may remove or insert a
2386 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
2388 /* We have a SIGTRAP, possibly a step-over dance has just
2389 finished. If so, tweak the state machine accordingly,
2390 reinsert breakpoints and delete any reinsert (software
2391 single-step) breakpoints. */
2392 step_over_finished
= finish_step_over (event_child
);
2394 /* Now invoke the callbacks of any internal breakpoints there. */
2395 check_breakpoints (event_child
->stop_pc
);
2397 /* Handle tracepoint data collecting. This may overflow the
2398 trace buffer, and cause a tracing stop, removing
2400 trace_event
= handle_tracepoints (event_child
);
2402 if (bp_explains_trap
)
2404 /* If we stepped or ran into an internal breakpoint, we've
2405 already handled it. So next time we resume (from this
2406 PC), we should step over it. */
2408 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
2410 if (breakpoint_here (event_child
->stop_pc
))
2411 event_child
->need_step_over
= 1;
2416 /* We have some other signal, possibly a step-over dance was in
2417 progress, and it should be cancelled too. */
2418 step_over_finished
= finish_step_over (event_child
);
2421 /* We have all the data we need. Either report the event to GDB, or
2422 resume threads and keep waiting for more. */
2424 /* If we're collecting a fast tracepoint, finish the collection and
2425 move out of the jump pad before delivering a signal. See
2426 linux_stabilize_threads. */
2429 && WSTOPSIG (w
) != SIGTRAP
2430 && supports_fast_tracepoints ()
2431 && agent_loaded_p ())
2435 "Got signal %d for LWP %ld. Check if we need "
2436 "to defer or adjust it.\n",
2437 WSTOPSIG (w
), lwpid_of (event_child
));
2439 /* Allow debugging the jump pad itself. */
2440 if (current_inferior
->last_resume_kind
!= resume_step
2441 && maybe_move_out_of_jump_pad (event_child
, &w
))
2443 enqueue_one_deferred_signal (event_child
, &w
);
2447 "Signal %d for LWP %ld deferred (in jump pad)\n",
2448 WSTOPSIG (w
), lwpid_of (event_child
));
2450 linux_resume_one_lwp (event_child
, 0, 0, NULL
);
2455 if (event_child
->collecting_fast_tracepoint
)
2459 LWP %ld was trying to move out of the jump pad (%d). \
2460 Check if we're already there.\n",
2461 lwpid_of (event_child
),
2462 event_child
->collecting_fast_tracepoint
);
2466 event_child
->collecting_fast_tracepoint
2467 = linux_fast_tracepoint_collecting (event_child
, NULL
);
2469 if (event_child
->collecting_fast_tracepoint
!= 1)
2471 /* No longer need this breakpoint. */
2472 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
2476 "No longer need exit-jump-pad bkpt; removing it."
2477 "stopping all threads momentarily.\n");
2479 /* Other running threads could hit this breakpoint.
2480 We don't handle moribund locations like GDB does,
2481 instead we always pause all threads when removing
2482 breakpoints, so that any step-over or
2483 decr_pc_after_break adjustment is always taken
2484 care of while the breakpoint is still
2486 stop_all_lwps (1, event_child
);
2487 cancel_breakpoints ();
2489 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
2490 event_child
->exit_jump_pad_bkpt
= NULL
;
2492 unstop_all_lwps (1, event_child
);
2494 gdb_assert (event_child
->suspended
>= 0);
2498 if (event_child
->collecting_fast_tracepoint
== 0)
2502 "fast tracepoint finished "
2503 "collecting successfully.\n");
2505 /* We may have a deferred signal to report. */
2506 if (dequeue_one_deferred_signal (event_child
, &w
))
2509 fprintf (stderr
, "dequeued one signal.\n");
2514 fprintf (stderr
, "no deferred signals.\n");
2516 if (stabilizing_threads
)
2518 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
2519 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
2520 return ptid_of (event_child
);
2526 /* Check whether GDB would be interested in this event. */
2528 /* If GDB is not interested in this signal, don't stop other
2529 threads, and don't report it to GDB. Just resume the inferior
2530 right away. We do this for threading-related signals as well as
2531 any that GDB specifically requested we ignore. But never ignore
2532 SIGSTOP if we sent it ourselves, and do not ignore signals when
2533 stepping - they may require special handling to skip the signal
2535 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2538 && current_inferior
->last_resume_kind
!= resume_step
2540 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2541 (current_process ()->private->thread_db
!= NULL
2542 && (WSTOPSIG (w
) == __SIGRTMIN
2543 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
2546 (pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
2547 && !(WSTOPSIG (w
) == SIGSTOP
2548 && current_inferior
->last_resume_kind
== resume_stop
))))
2550 siginfo_t info
, *info_p
;
2553 fprintf (stderr
, "Ignored signal %d for LWP %ld.\n",
2554 WSTOPSIG (w
), lwpid_of (event_child
));
2556 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (event_child
),
2557 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
2561 linux_resume_one_lwp (event_child
, event_child
->stepping
,
2562 WSTOPSIG (w
), info_p
);
2566 /* Note that all addresses are always "out of the step range" when
2567 there's no range to begin with. */
2568 in_step_range
= lwp_in_step_range (event_child
);
2570 /* If GDB wanted this thread to single step, and the thread is out
2571 of the step range, we always want to report the SIGTRAP, and let
2572 GDB handle it. Watchpoints should always be reported. So should
2573 signals we can't explain. A SIGTRAP we can't explain could be a
2574 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2575 do, we're be able to handle GDB breakpoints on top of internal
2576 breakpoints, by handling the internal breakpoint and still
2577 reporting the event to GDB. If we don't, we're out of luck, GDB
2578 won't see the breakpoint hit. */
2579 report_to_gdb
= (!maybe_internal_trap
2580 || (current_inferior
->last_resume_kind
== resume_step
2582 || event_child
->stopped_by_watchpoint
2583 || (!step_over_finished
&& !in_step_range
2584 && !bp_explains_trap
&& !trace_event
)
2585 || (gdb_breakpoint_here (event_child
->stop_pc
)
2586 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
2587 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
)));
2589 run_breakpoint_commands (event_child
->stop_pc
);
2591 /* We found no reason GDB would want us to stop. We either hit one
2592 of our own breakpoints, or finished an internal step GDB
2593 shouldn't know about. */
2598 if (bp_explains_trap
)
2599 fprintf (stderr
, "Hit a gdbserver breakpoint.\n");
2600 if (step_over_finished
)
2601 fprintf (stderr
, "Step-over finished.\n");
2603 fprintf (stderr
, "Tracepoint event.\n");
2604 if (lwp_in_step_range (event_child
))
2605 fprintf (stderr
, "Range stepping pc 0x%s [0x%s, 0x%s).\n",
2606 paddress (event_child
->stop_pc
),
2607 paddress (event_child
->step_range_start
),
2608 paddress (event_child
->step_range_end
));
2611 /* We're not reporting this breakpoint to GDB, so apply the
2612 decr_pc_after_break adjustment to the inferior's regcache
2615 if (the_low_target
.set_pc
!= NULL
)
2617 struct regcache
*regcache
2618 = get_thread_regcache (get_lwp_thread (event_child
), 1);
2619 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
2622 /* We may have finished stepping over a breakpoint. If so,
2623 we've stopped and suspended all LWPs momentarily except the
2624 stepping one. This is where we resume them all again. We're
2625 going to keep waiting, so use proceed, which handles stepping
2626 over the next breakpoint. */
2628 fprintf (stderr
, "proceeding all threads.\n");
2630 if (step_over_finished
)
2631 unsuspend_all_lwps (event_child
);
2633 proceed_all_lwps ();
2639 if (current_inferior
->last_resume_kind
== resume_step
)
2641 if (event_child
->step_range_start
== event_child
->step_range_end
)
2642 fprintf (stderr
, "GDB wanted to single-step, reporting event.\n");
2643 else if (!lwp_in_step_range (event_child
))
2644 fprintf (stderr
, "Out of step range, reporting event.\n");
2646 if (event_child
->stopped_by_watchpoint
)
2647 fprintf (stderr
, "Stopped by watchpoint.\n");
2648 if (gdb_breakpoint_here (event_child
->stop_pc
))
2649 fprintf (stderr
, "Stopped by GDB breakpoint.\n");
2651 fprintf (stderr
, "Hit a non-gdbserver trap event.\n");
2654 /* Alright, we're going to report a stop. */
2656 if (!non_stop
&& !stabilizing_threads
)
2658 /* In all-stop, stop all threads. */
2659 stop_all_lwps (0, NULL
);
2661 /* If we're not waiting for a specific LWP, choose an event LWP
2662 from among those that have had events. Giving equal priority
2663 to all LWPs that have had events helps prevent
2665 if (ptid_equal (ptid
, minus_one_ptid
))
2667 event_child
->status_pending_p
= 1;
2668 event_child
->status_pending
= w
;
2670 select_event_lwp (&event_child
);
2672 event_child
->status_pending_p
= 0;
2673 w
= event_child
->status_pending
;
2676 /* Now that we've selected our final event LWP, cancel any
2677 breakpoints in other LWPs that have hit a GDB breakpoint.
2678 See the comment in cancel_breakpoints_callback to find out
2680 find_inferior (&all_lwps
, cancel_breakpoints_callback
, event_child
);
2682 /* If we were going a step-over, all other threads but the stepping one
2683 had been paused in start_step_over, with their suspend counts
2684 incremented. We don't want to do a full unstop/unpause, because we're
2685 in all-stop mode (so we want threads stopped), but we still need to
2686 unsuspend the other threads, to decrement their `suspended' count
2688 if (step_over_finished
)
2689 unsuspend_all_lwps (event_child
);
2691 /* Stabilize threads (move out of jump pads). */
2692 stabilize_threads ();
2696 /* If we just finished a step-over, then all threads had been
2697 momentarily paused. In all-stop, that's fine, we want
2698 threads stopped by now anyway. In non-stop, we need to
2699 re-resume threads that GDB wanted to be running. */
2700 if (step_over_finished
)
2701 unstop_all_lwps (1, event_child
);
2704 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
2706 if (current_inferior
->last_resume_kind
== resume_stop
2707 && WSTOPSIG (w
) == SIGSTOP
)
2709 /* A thread that has been requested to stop by GDB with vCont;t,
2710 and it stopped cleanly, so report as SIG0. The use of
2711 SIGSTOP is an implementation detail. */
2712 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
2714 else if (current_inferior
->last_resume_kind
== resume_stop
2715 && WSTOPSIG (w
) != SIGSTOP
)
2717 /* A thread that has been requested to stop by GDB with vCont;t,
2718 but, it stopped for other reasons. */
2719 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
2723 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
2726 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
2729 fprintf (stderr
, "linux_wait ret = %s, %d, %d\n",
2730 target_pid_to_str (ptid_of (event_child
)),
2732 ourstatus
->value
.sig
);
2734 return ptid_of (event_child
);
2737 /* Get rid of any pending event in the pipe. */
2739 async_file_flush (void)
2745 ret
= read (linux_event_pipe
[0], &buf
, 1);
2746 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
2749 /* Put something in the pipe, so the event loop wakes up. */
2751 async_file_mark (void)
2755 async_file_flush ();
2758 ret
= write (linux_event_pipe
[1], "+", 1);
2759 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
2761 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2762 be awakened anyway. */
2766 linux_wait (ptid_t ptid
,
2767 struct target_waitstatus
*ourstatus
, int target_options
)
2772 fprintf (stderr
, "linux_wait: [%s]\n", target_pid_to_str (ptid
));
2774 /* Flush the async file first. */
2775 if (target_is_async_p ())
2776 async_file_flush ();
2778 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
2780 /* If at least one stop was reported, there may be more. A single
2781 SIGCHLD can signal more than one child stop. */
2782 if (target_is_async_p ()
2783 && (target_options
& TARGET_WNOHANG
) != 0
2784 && !ptid_equal (event_ptid
, null_ptid
))
2790 /* Send a signal to an LWP. */
2793 kill_lwp (unsigned long lwpid
, int signo
)
2795 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2796 fails, then we are not using nptl threads and we should be using kill. */
2800 static int tkill_failed
;
2807 ret
= syscall (__NR_tkill
, lwpid
, signo
);
2808 if (errno
!= ENOSYS
)
2815 return kill (lwpid
, signo
);
2819 linux_stop_lwp (struct lwp_info
*lwp
)
2825 send_sigstop (struct lwp_info
*lwp
)
2829 pid
= lwpid_of (lwp
);
2831 /* If we already have a pending stop signal for this process, don't
2833 if (lwp
->stop_expected
)
2836 fprintf (stderr
, "Have pending sigstop for lwp %d\n", pid
);
2842 fprintf (stderr
, "Sending sigstop to lwp %d\n", pid
);
2844 lwp
->stop_expected
= 1;
2845 kill_lwp (pid
, SIGSTOP
);
2849 send_sigstop_callback (struct inferior_list_entry
*entry
, void *except
)
2851 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2853 /* Ignore EXCEPT. */
2864 /* Increment the suspend count of an LWP, and stop it, if not stopped
2867 suspend_and_send_sigstop_callback (struct inferior_list_entry
*entry
,
2870 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2872 /* Ignore EXCEPT. */
2878 return send_sigstop_callback (entry
, except
);
2882 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
2884 /* It's dead, really. */
2887 /* Store the exit status for later. */
2888 lwp
->status_pending_p
= 1;
2889 lwp
->status_pending
= wstat
;
2891 /* Prevent trying to stop it. */
2894 /* No further stops are expected from a dead lwp. */
2895 lwp
->stop_expected
= 0;
2899 wait_for_sigstop (struct inferior_list_entry
*entry
)
2901 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2902 struct thread_info
*saved_inferior
;
2911 fprintf (stderr
, "wait_for_sigstop: LWP %ld already stopped\n",
2916 saved_inferior
= current_inferior
;
2917 if (saved_inferior
!= NULL
)
2918 saved_tid
= ((struct inferior_list_entry
*) saved_inferior
)->id
;
2920 saved_tid
= null_ptid
; /* avoid bogus unused warning */
2922 ptid
= lwp
->head
.id
;
2925 fprintf (stderr
, "wait_for_sigstop: pulling one event\n");
2927 pid
= linux_wait_for_event (ptid
, &wstat
, __WALL
);
2929 /* If we stopped with a non-SIGSTOP signal, save it for later
2930 and record the pending SIGSTOP. If the process exited, just
2932 if (WIFSTOPPED (wstat
))
2935 fprintf (stderr
, "LWP %ld stopped with signal %d\n",
2936 lwpid_of (lwp
), WSTOPSIG (wstat
));
2938 if (WSTOPSIG (wstat
) != SIGSTOP
)
2941 fprintf (stderr
, "LWP %ld stopped with non-sigstop status %06x\n",
2942 lwpid_of (lwp
), wstat
);
2944 lwp
->status_pending_p
= 1;
2945 lwp
->status_pending
= wstat
;
2951 fprintf (stderr
, "Process %d exited while stopping LWPs\n", pid
);
2953 lwp
= find_lwp_pid (pid_to_ptid (pid
));
2956 /* Leave this status pending for the next time we're able to
2957 report it. In the mean time, we'll report this lwp as
2958 dead to GDB, so GDB doesn't try to read registers and
2959 memory from it. This can only happen if this was the
2960 last thread of the process; otherwise, PID is removed
2961 from the thread tables before linux_wait_for_event
2963 mark_lwp_dead (lwp
, wstat
);
2967 if (saved_inferior
== NULL
|| linux_thread_alive (saved_tid
))
2968 current_inferior
= saved_inferior
;
2972 fprintf (stderr
, "Previously current thread died.\n");
2976 /* We can't change the current inferior behind GDB's back,
2977 otherwise, a subsequent command may apply to the wrong
2979 current_inferior
= NULL
;
2983 /* Set a valid thread as current. */
2984 set_desired_inferior (0);
2989 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2990 move it out, because we need to report the stop event to GDB. For
2991 example, if the user puts a breakpoint in the jump pad, it's
2992 because she wants to debug it. */
2995 stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
, void *data
)
2997 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
2998 struct thread_info
*thread
= get_lwp_thread (lwp
);
3000 gdb_assert (lwp
->suspended
== 0);
3001 gdb_assert (lwp
->stopped
);
3003 /* Allow debugging the jump pad, gdb_collect, etc.. */
3004 return (supports_fast_tracepoints ()
3005 && agent_loaded_p ()
3006 && (gdb_breakpoint_here (lwp
->stop_pc
)
3007 || lwp
->stopped_by_watchpoint
3008 || thread
->last_resume_kind
== resume_step
)
3009 && linux_fast_tracepoint_collecting (lwp
, NULL
));
3013 move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
)
3015 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3016 struct thread_info
*thread
= get_lwp_thread (lwp
);
3019 gdb_assert (lwp
->suspended
== 0);
3020 gdb_assert (lwp
->stopped
);
3022 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3024 /* Allow debugging the jump pad, gdb_collect, etc. */
3025 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3026 && !lwp
->stopped_by_watchpoint
3027 && thread
->last_resume_kind
!= resume_step
3028 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3032 "LWP %ld needs stabilizing (in jump pad)\n",
3037 lwp
->status_pending_p
= 0;
3038 enqueue_one_deferred_signal (lwp
, wstat
);
3042 "Signal %d for LWP %ld deferred "
3044 WSTOPSIG (*wstat
), lwpid_of (lwp
));
3047 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
3054 lwp_running (struct inferior_list_entry
*entry
, void *data
)
3056 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3065 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3066 If SUSPEND, then also increase the suspend count of every LWP,
3070 stop_all_lwps (int suspend
, struct lwp_info
*except
)
3072 /* Should not be called recursively. */
3073 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
3075 stopping_threads
= (suspend
3076 ? STOPPING_AND_SUSPENDING_THREADS
3077 : STOPPING_THREADS
);
3080 find_inferior (&all_lwps
, suspend_and_send_sigstop_callback
, except
);
3082 find_inferior (&all_lwps
, send_sigstop_callback
, except
);
3083 for_each_inferior (&all_lwps
, wait_for_sigstop
);
3084 stopping_threads
= NOT_STOPPING_THREADS
;
3087 /* Resume execution of the inferior process.
3088 If STEP is nonzero, single-step it.
3089 If SIGNAL is nonzero, give it that signal. */
3092 linux_resume_one_lwp (struct lwp_info
*lwp
,
3093 int step
, int signal
, siginfo_t
*info
)
3095 struct thread_info
*saved_inferior
;
3096 int fast_tp_collecting
;
3098 if (lwp
->stopped
== 0)
3101 fast_tp_collecting
= lwp
->collecting_fast_tracepoint
;
3103 gdb_assert (!stabilizing_threads
|| fast_tp_collecting
);
3105 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3106 user used the "jump" command, or "set $pc = foo"). */
3107 if (lwp
->stop_pc
!= get_pc (lwp
))
3109 /* Collecting 'while-stepping' actions doesn't make sense
3111 release_while_stepping_state_list (get_lwp_thread (lwp
));
3114 /* If we have pending signals or status, and a new signal, enqueue the
3115 signal. Also enqueue the signal if we are waiting to reinsert a
3116 breakpoint; it will be picked up again below. */
3118 && (lwp
->status_pending_p
3119 || lwp
->pending_signals
!= NULL
3120 || lwp
->bp_reinsert
!= 0
3121 || fast_tp_collecting
))
3123 struct pending_signals
*p_sig
;
3124 p_sig
= xmalloc (sizeof (*p_sig
));
3125 p_sig
->prev
= lwp
->pending_signals
;
3126 p_sig
->signal
= signal
;
3128 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3130 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
3131 lwp
->pending_signals
= p_sig
;
3134 if (lwp
->status_pending_p
)
3137 fprintf (stderr
, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3138 " has pending status\n",
3139 lwpid_of (lwp
), step
? "step" : "continue", signal
,
3140 lwp
->stop_expected
? "expected" : "not expected");
3144 saved_inferior
= current_inferior
;
3145 current_inferior
= get_lwp_thread (lwp
);
3148 fprintf (stderr
, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3149 lwpid_of (lwp
), step
? "step" : "continue", signal
,
3150 lwp
->stop_expected
? "expected" : "not expected");
3152 /* This bit needs some thinking about. If we get a signal that
3153 we must report while a single-step reinsert is still pending,
3154 we often end up resuming the thread. It might be better to
3155 (ew) allow a stack of pending events; then we could be sure that
3156 the reinsert happened right away and not lose any signals.
3158 Making this stack would also shrink the window in which breakpoints are
3159 uninserted (see comment in linux_wait_for_lwp) but not enough for
3160 complete correctness, so it won't solve that problem. It may be
3161 worthwhile just to solve this one, however. */
3162 if (lwp
->bp_reinsert
!= 0)
3165 fprintf (stderr
, " pending reinsert at 0x%s\n",
3166 paddress (lwp
->bp_reinsert
));
3168 if (can_hardware_single_step ())
3170 if (fast_tp_collecting
== 0)
3173 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
3175 fprintf (stderr
, "BAD - reinserting and suspended(%d).\n",
3182 /* Postpone any pending signal. It was enqueued above. */
3186 if (fast_tp_collecting
== 1)
3190 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3193 /* Postpone any pending signal. It was enqueued above. */
3196 else if (fast_tp_collecting
== 2)
3200 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3203 if (can_hardware_single_step ())
3206 fatal ("moving out of jump pad single-stepping"
3207 " not implemented on this target");
3209 /* Postpone any pending signal. It was enqueued above. */
3213 /* If we have while-stepping actions in this thread set it stepping.
3214 If we have a signal to deliver, it may or may not be set to
3215 SIG_IGN, we don't know. Assume so, and allow collecting
3216 while-stepping into a signal handler. A possible smart thing to
3217 do would be to set an internal breakpoint at the signal return
3218 address, continue, and carry on catching this while-stepping
3219 action only when that breakpoint is hit. A future
3221 if (get_lwp_thread (lwp
)->while_stepping
!= NULL
3222 && can_hardware_single_step ())
3226 "lwp %ld has a while-stepping action -> forcing step.\n",
3231 if (debug_threads
&& the_low_target
.get_pc
!= NULL
)
3233 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 1);
3234 CORE_ADDR pc
= (*the_low_target
.get_pc
) (regcache
);
3235 fprintf (stderr
, " resuming from pc 0x%lx\n", (long) pc
);
3238 /* If we have pending signals, consume one unless we are trying to
3239 reinsert a breakpoint or we're trying to finish a fast tracepoint
3241 if (lwp
->pending_signals
!= NULL
3242 && lwp
->bp_reinsert
== 0
3243 && fast_tp_collecting
== 0)
3245 struct pending_signals
**p_sig
;
3247 p_sig
= &lwp
->pending_signals
;
3248 while ((*p_sig
)->prev
!= NULL
)
3249 p_sig
= &(*p_sig
)->prev
;
3251 signal
= (*p_sig
)->signal
;
3252 if ((*p_sig
)->info
.si_signo
!= 0)
3253 ptrace (PTRACE_SETSIGINFO
, lwpid_of (lwp
), (PTRACE_TYPE_ARG3
) 0,
3260 if (the_low_target
.prepare_to_resume
!= NULL
)
3261 the_low_target
.prepare_to_resume (lwp
);
3263 regcache_invalidate_thread (get_lwp_thread (lwp
));
3266 lwp
->stopped_by_watchpoint
= 0;
3267 lwp
->stepping
= step
;
3268 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (lwp
),
3269 (PTRACE_TYPE_ARG3
) 0,
3270 /* Coerce to a uintptr_t first to avoid potential gcc warning
3271 of coercing an 8 byte integer to a 4 byte pointer. */
3272 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
3274 current_inferior
= saved_inferior
;
3277 /* ESRCH from ptrace either means that the thread was already
3278 running (an error) or that it is gone (a race condition). If
3279 it's gone, we will get a notification the next time we wait,
3280 so we can ignore the error. We could differentiate these
3281 two, but it's tricky without waiting; the thread still exists
3282 as a zombie, so sending it signal 0 would succeed. So just
3287 perror_with_name ("ptrace");
3291 struct thread_resume_array
3293 struct thread_resume
*resume
;
3297 /* This function is called once per thread. We look up the thread
3298 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3301 This algorithm is O(threads * resume elements), but resume elements
3302 is small (and will remain small at least until GDB supports thread
3305 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
3307 struct lwp_info
*lwp
;
3308 struct thread_info
*thread
;
3310 struct thread_resume_array
*r
;
3312 thread
= (struct thread_info
*) entry
;
3313 lwp
= get_thread_lwp (thread
);
3316 for (ndx
= 0; ndx
< r
->n
; ndx
++)
3318 ptid_t ptid
= r
->resume
[ndx
].thread
;
3319 if (ptid_equal (ptid
, minus_one_ptid
)
3320 || ptid_equal (ptid
, entry
->id
)
3321 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3323 || (ptid_get_pid (ptid
) == pid_of (lwp
)
3324 && (ptid_is_pid (ptid
)
3325 || ptid_get_lwp (ptid
) == -1)))
3327 if (r
->resume
[ndx
].kind
== resume_stop
3328 && thread
->last_resume_kind
== resume_stop
)
3331 fprintf (stderr
, "already %s LWP %ld at GDB's request\n",
3332 thread
->last_status
.kind
== TARGET_WAITKIND_STOPPED
3340 lwp
->resume
= &r
->resume
[ndx
];
3341 thread
->last_resume_kind
= lwp
->resume
->kind
;
3343 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
3344 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
3346 /* If we had a deferred signal to report, dequeue one now.
3347 This can happen if LWP gets more than one signal while
3348 trying to get out of a jump pad. */
3350 && !lwp
->status_pending_p
3351 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
3353 lwp
->status_pending_p
= 1;
3357 "Dequeueing deferred signal %d for LWP %ld, "
3358 "leaving status pending.\n",
3359 WSTOPSIG (lwp
->status_pending
), lwpid_of (lwp
));
3366 /* No resume action for this thread. */
3373 /* Set *FLAG_P if this lwp has an interesting status pending. */
3375 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
3377 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3379 /* LWPs which will not be resumed are not interesting, because
3380 we might not wait for them next time through linux_wait. */
3381 if (lwp
->resume
== NULL
)
3384 if (lwp
->status_pending_p
)
3385 * (int *) flag_p
= 1;
3390 /* Return 1 if this lwp that GDB wants running is stopped at an
3391 internal breakpoint that we need to step over. It assumes that any
3392 required STOP_PC adjustment has already been propagated to the
3393 inferior's regcache. */
3396 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
3398 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3399 struct thread_info
*thread
;
3400 struct thread_info
*saved_inferior
;
3403 /* LWPs which will not be resumed are not interesting, because we
3404 might not wait for them next time through linux_wait. */
3410 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3415 thread
= get_lwp_thread (lwp
);
3417 if (thread
->last_resume_kind
== resume_stop
)
3421 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3426 gdb_assert (lwp
->suspended
>= 0);
3432 "Need step over [LWP %ld]? Ignoring, suspended\n",
3437 if (!lwp
->need_step_over
)
3441 "Need step over [LWP %ld]? No\n", lwpid_of (lwp
));
3444 if (lwp
->status_pending_p
)
3448 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3453 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3457 /* If the PC has changed since we stopped, then don't do anything,
3458 and let the breakpoint/tracepoint be hit. This happens if, for
3459 instance, GDB handled the decr_pc_after_break subtraction itself,
3460 GDB is OOL stepping this thread, or the user has issued a "jump"
3461 command, or poked thread's registers herself. */
3462 if (pc
!= lwp
->stop_pc
)
3466 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3467 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3468 lwpid_of (lwp
), paddress (lwp
->stop_pc
), paddress (pc
));
3470 lwp
->need_step_over
= 0;
3474 saved_inferior
= current_inferior
;
3475 current_inferior
= thread
;
3477 /* We can only step over breakpoints we know about. */
3478 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
3480 /* Don't step over a breakpoint that GDB expects to hit
3481 though. If the condition is being evaluated on the target's side
3482 and it evaluate to false, step over this breakpoint as well. */
3483 if (gdb_breakpoint_here (pc
)
3484 && gdb_condition_true_at_breakpoint (pc
)
3485 && gdb_no_commands_at_breakpoint (pc
))
3489 "Need step over [LWP %ld]? yes, but found"
3490 " GDB breakpoint at 0x%s; skipping step over\n",
3491 lwpid_of (lwp
), paddress (pc
));
3493 current_inferior
= saved_inferior
;
3500 "Need step over [LWP %ld]? yes, "
3501 "found breakpoint at 0x%s\n",
3502 lwpid_of (lwp
), paddress (pc
));
3504 /* We've found an lwp that needs stepping over --- return 1 so
3505 that find_inferior stops looking. */
3506 current_inferior
= saved_inferior
;
3508 /* If the step over is cancelled, this is set again. */
3509 lwp
->need_step_over
= 0;
3514 current_inferior
= saved_inferior
;
3518 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3519 lwpid_of (lwp
), paddress (pc
));
3524 /* Start a step-over operation on LWP. When LWP stopped at a
3525 breakpoint, to make progress, we need to remove the breakpoint out
3526 of the way. If we let other threads run while we do that, they may
3527 pass by the breakpoint location and miss hitting it. To avoid
3528 that, a step-over momentarily stops all threads while LWP is
3529 single-stepped while the breakpoint is temporarily uninserted from
3530 the inferior. When the single-step finishes, we reinsert the
3531 breakpoint, and let all threads that are supposed to be running,
3534 On targets that don't support hardware single-step, we don't
3535 currently support full software single-stepping. Instead, we only
3536 support stepping over the thread event breakpoint, by asking the
3537 low target where to place a reinsert breakpoint. Since this
3538 routine assumes the breakpoint being stepped over is a thread event
3539 breakpoint, it usually assumes the return address of the current
3540 function is a good enough place to set the reinsert breakpoint. */
3543 start_step_over (struct lwp_info
*lwp
)
3545 struct thread_info
*saved_inferior
;
3551 "Starting step-over on LWP %ld. Stopping all threads\n",
3554 stop_all_lwps (1, lwp
);
3555 gdb_assert (lwp
->suspended
== 0);
3558 fprintf (stderr
, "Done stopping all threads for step-over.\n");
3560 /* Note, we should always reach here with an already adjusted PC,
3561 either by GDB (if we're resuming due to GDB's request), or by our
3562 caller, if we just finished handling an internal breakpoint GDB
3563 shouldn't care about. */
3566 saved_inferior
= current_inferior
;
3567 current_inferior
= get_lwp_thread (lwp
);
3569 lwp
->bp_reinsert
= pc
;
3570 uninsert_breakpoints_at (pc
);
3571 uninsert_fast_tracepoint_jumps_at (pc
);
3573 if (can_hardware_single_step ())
3579 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
3580 set_reinsert_breakpoint (raddr
);
3584 current_inferior
= saved_inferior
;
3586 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
3588 /* Require next event from this LWP. */
3589 step_over_bkpt
= lwp
->head
.id
;
3593 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3594 start_step_over, if still there, and delete any reinsert
3595 breakpoints we've set, on non hardware single-step targets. */
3598 finish_step_over (struct lwp_info
*lwp
)
3600 if (lwp
->bp_reinsert
!= 0)
3603 fprintf (stderr
, "Finished step over.\n");
3605 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3606 may be no breakpoint to reinsert there by now. */
3607 reinsert_breakpoints_at (lwp
->bp_reinsert
);
3608 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
3610 lwp
->bp_reinsert
= 0;
3612 /* Delete any software-single-step reinsert breakpoints. No
3613 longer needed. We don't have to worry about other threads
3614 hitting this trap, and later not being able to explain it,
3615 because we were stepping over a breakpoint, and we hold all
3616 threads but LWP stopped while doing that. */
3617 if (!can_hardware_single_step ())
3618 delete_reinsert_breakpoints ();
3620 step_over_bkpt
= null_ptid
;
3627 /* This function is called once per thread. We check the thread's resume
3628 request, which will tell us whether to resume, step, or leave the thread
3629 stopped; and what signal, if any, it should be sent.
3631 For threads which we aren't explicitly told otherwise, we preserve
3632 the stepping flag; this is used for stepping over gdbserver-placed
3635 If pending_flags was set in any thread, we queue any needed
3636 signals, since we won't actually resume. We already have a pending
3637 event to report, so we don't need to preserve any step requests;
3638 they should be re-issued if necessary. */
3641 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
3643 struct lwp_info
*lwp
;
3644 struct thread_info
*thread
;
3646 int leave_all_stopped
= * (int *) arg
;
3649 thread
= (struct thread_info
*) entry
;
3650 lwp
= get_thread_lwp (thread
);
3652 if (lwp
->resume
== NULL
)
3655 if (lwp
->resume
->kind
== resume_stop
)
3658 fprintf (stderr
, "resume_stop request for LWP %ld\n", lwpid_of (lwp
));
3663 fprintf (stderr
, "stopping LWP %ld\n", lwpid_of (lwp
));
3665 /* Stop the thread, and wait for the event asynchronously,
3666 through the event loop. */
3672 fprintf (stderr
, "already stopped LWP %ld\n",
3675 /* The LWP may have been stopped in an internal event that
3676 was not meant to be notified back to GDB (e.g., gdbserver
3677 breakpoint), so we should be reporting a stop event in
3680 /* If the thread already has a pending SIGSTOP, this is a
3681 no-op. Otherwise, something later will presumably resume
3682 the thread and this will cause it to cancel any pending
3683 operation, due to last_resume_kind == resume_stop. If
3684 the thread already has a pending status to report, we
3685 will still report it the next time we wait - see
3686 status_pending_p_callback. */
3688 /* If we already have a pending signal to report, then
3689 there's no need to queue a SIGSTOP, as this means we're
3690 midway through moving the LWP out of the jumppad, and we
3691 will report the pending signal as soon as that is
3693 if (lwp
->pending_signals_to_report
== NULL
)
3697 /* For stop requests, we're done. */
3699 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
3703 /* If this thread which is about to be resumed has a pending status,
3704 then don't resume any threads - we can just report the pending
3705 status. Make sure to queue any signals that would otherwise be
3706 sent. In all-stop mode, we do this decision based on if *any*
3707 thread has a pending status. If there's a thread that needs the
3708 step-over-breakpoint dance, then don't resume any other thread
3709 but that particular one. */
3710 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
3715 fprintf (stderr
, "resuming LWP %ld\n", lwpid_of (lwp
));
3717 step
= (lwp
->resume
->kind
== resume_step
);
3718 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
3723 fprintf (stderr
, "leaving LWP %ld stopped\n", lwpid_of (lwp
));
3725 /* If we have a new signal, enqueue the signal. */
3726 if (lwp
->resume
->sig
!= 0)
3728 struct pending_signals
*p_sig
;
3729 p_sig
= xmalloc (sizeof (*p_sig
));
3730 p_sig
->prev
= lwp
->pending_signals
;
3731 p_sig
->signal
= lwp
->resume
->sig
;
3732 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3734 /* If this is the same signal we were previously stopped by,
3735 make sure to queue its siginfo. We can ignore the return
3736 value of ptrace; if it fails, we'll skip
3737 PTRACE_SETSIGINFO. */
3738 if (WIFSTOPPED (lwp
->last_status
)
3739 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
3740 ptrace (PTRACE_GETSIGINFO
, lwpid_of (lwp
), (PTRACE_TYPE_ARG3
) 0,
3743 lwp
->pending_signals
= p_sig
;
3747 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
3753 linux_resume (struct thread_resume
*resume_info
, size_t n
)
3755 struct thread_resume_array array
= { resume_info
, n
};
3756 struct lwp_info
*need_step_over
= NULL
;
3758 int leave_all_stopped
;
3760 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
3762 /* If there is a thread which would otherwise be resumed, which has
3763 a pending status, then don't resume any threads - we can just
3764 report the pending status. Make sure to queue any signals that
3765 would otherwise be sent. In non-stop mode, we'll apply this
3766 logic to each thread individually. We consume all pending events
3767 before considering to start a step-over (in all-stop). */
3770 find_inferior (&all_lwps
, resume_status_pending_p
, &any_pending
);
3772 /* If there is a thread which would otherwise be resumed, which is
3773 stopped at a breakpoint that needs stepping over, then don't
3774 resume any threads - have it step over the breakpoint with all
3775 other threads stopped, then resume all threads again. Make sure
3776 to queue any signals that would otherwise be delivered or
3778 if (!any_pending
&& supports_breakpoints ())
3780 = (struct lwp_info
*) find_inferior (&all_lwps
,
3781 need_step_over_p
, NULL
);
3783 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
3787 if (need_step_over
!= NULL
)
3788 fprintf (stderr
, "Not resuming all, need step over\n");
3789 else if (any_pending
)
3791 "Not resuming, all-stop and found "
3792 "an LWP with pending status\n");
3794 fprintf (stderr
, "Resuming, no pending status or step over needed\n");
3797 /* Even if we're leaving threads stopped, queue all signals we'd
3798 otherwise deliver. */
3799 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
3802 start_step_over (need_step_over
);
3805 /* This function is called once per thread. We check the thread's
3806 last resume request, which will tell us whether to resume, step, or
3807 leave the thread stopped. Any signal the client requested to be
3808 delivered has already been enqueued at this point.
3810 If any thread that GDB wants running is stopped at an internal
3811 breakpoint that needs stepping over, we start a step-over operation
3812 on that particular thread, and leave all others stopped. */
3815 proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
3817 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3818 struct thread_info
*thread
;
3826 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp
));
3831 fprintf (stderr
, " LWP %ld already running\n", lwpid_of (lwp
));
3835 thread
= get_lwp_thread (lwp
);
3837 if (thread
->last_resume_kind
== resume_stop
3838 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
3841 fprintf (stderr
, " client wants LWP to remain %ld stopped\n",
3846 if (lwp
->status_pending_p
)
3849 fprintf (stderr
, " LWP %ld has pending status, leaving stopped\n",
3854 gdb_assert (lwp
->suspended
>= 0);
3859 fprintf (stderr
, " LWP %ld is suspended\n", lwpid_of (lwp
));
3863 if (thread
->last_resume_kind
== resume_stop
3864 && lwp
->pending_signals_to_report
== NULL
3865 && lwp
->collecting_fast_tracepoint
== 0)
3867 /* We haven't reported this LWP as stopped yet (otherwise, the
3868 last_status.kind check above would catch it, and we wouldn't
3869 reach here. This LWP may have been momentarily paused by a
3870 stop_all_lwps call while handling for example, another LWP's
3871 step-over. In that case, the pending expected SIGSTOP signal
3872 that was queued at vCont;t handling time will have already
3873 been consumed by wait_for_sigstop, and so we need to requeue
3874 another one here. Note that if the LWP already has a SIGSTOP
3875 pending, this is a no-op. */
3879 "Client wants LWP %ld to stop. "
3880 "Making sure it has a SIGSTOP pending\n",
3886 step
= thread
->last_resume_kind
== resume_step
;
3887 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
3892 unsuspend_and_proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
3894 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
3900 gdb_assert (lwp
->suspended
>= 0);
3902 return proceed_one_lwp (entry
, except
);
3905 /* When we finish a step-over, set threads running again. If there's
3906 another thread that may need a step-over, now's the time to start
3907 it. Eventually, we'll move all threads past their breakpoints. */
3910 proceed_all_lwps (void)
3912 struct lwp_info
*need_step_over
;
3914 /* If there is a thread which would otherwise be resumed, which is
3915 stopped at a breakpoint that needs stepping over, then don't
3916 resume any threads - have it step over the breakpoint with all
3917 other threads stopped, then resume all threads again. */
3919 if (supports_breakpoints ())
3922 = (struct lwp_info
*) find_inferior (&all_lwps
,
3923 need_step_over_p
, NULL
);
3925 if (need_step_over
!= NULL
)
3928 fprintf (stderr
, "proceed_all_lwps: found "
3929 "thread %ld needing a step-over\n",
3930 lwpid_of (need_step_over
));
3932 start_step_over (need_step_over
);
3938 fprintf (stderr
, "Proceeding, no step-over needed\n");
3940 find_inferior (&all_lwps
, proceed_one_lwp
, NULL
);
3943 /* Stopped LWPs that the client wanted to be running, that don't have
3944 pending statuses, are set to run again, except for EXCEPT, if not
3945 NULL. This undoes a stop_all_lwps call. */
3948 unstop_all_lwps (int unsuspend
, struct lwp_info
*except
)
3954 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except
));
3957 "unstopping all lwps\n");
3961 find_inferior (&all_lwps
, unsuspend_and_proceed_one_lwp
, except
);
3963 find_inferior (&all_lwps
, proceed_one_lwp
, except
);
3967 #ifdef HAVE_LINUX_REGSETS
3969 #define use_linux_regsets 1
3971 /* Returns true if REGSET has been disabled. */
3974 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
3976 return (info
->disabled_regsets
!= NULL
3977 && info
->disabled_regsets
[regset
- info
->regsets
]);
3980 /* Disable REGSET. */
3983 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
3987 dr_offset
= regset
- info
->regsets
;
3988 if (info
->disabled_regsets
== NULL
)
3989 info
->disabled_regsets
= xcalloc (1, info
->num_regsets
);
3990 info
->disabled_regsets
[dr_offset
] = 1;
3994 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
3995 struct regcache
*regcache
)
3997 struct regset_info
*regset
;
3998 int saw_general_regs
= 0;
4002 regset
= regsets_info
->regsets
;
4004 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4005 while (regset
->size
>= 0)
4010 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
4016 buf
= xmalloc (regset
->size
);
4018 nt_type
= regset
->nt_type
;
4022 iov
.iov_len
= regset
->size
;
4023 data
= (void *) &iov
;
4029 res
= ptrace (regset
->get_request
, pid
,
4030 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4032 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4038 /* If we get EIO on a regset, do not try it again for
4039 this process mode. */
4040 disable_regset (regsets_info
, regset
);
4047 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4052 else if (regset
->type
== GENERAL_REGS
)
4053 saw_general_regs
= 1;
4054 regset
->store_function (regcache
, buf
);
4058 if (saw_general_regs
)
4065 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
4066 struct regcache
*regcache
)
4068 struct regset_info
*regset
;
4069 int saw_general_regs
= 0;
4073 regset
= regsets_info
->regsets
;
4075 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4076 while (regset
->size
>= 0)
4081 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
4087 buf
= xmalloc (regset
->size
);
4089 /* First fill the buffer with the current register set contents,
4090 in case there are any items in the kernel's regset that are
4091 not in gdbserver's regcache. */
4093 nt_type
= regset
->nt_type
;
4097 iov
.iov_len
= regset
->size
;
4098 data
= (void *) &iov
;
4104 res
= ptrace (regset
->get_request
, pid
,
4105 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4107 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4112 /* Then overlay our cached registers on that. */
4113 regset
->fill_function (regcache
, buf
);
4115 /* Only now do we write the register set. */
4117 res
= ptrace (regset
->set_request
, pid
,
4118 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4120 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
4128 /* If we get EIO on a regset, do not try it again for
4129 this process mode. */
4130 disable_regset (regsets_info
, regset
);
4134 else if (errno
== ESRCH
)
4136 /* At this point, ESRCH should mean the process is
4137 already gone, in which case we simply ignore attempts
4138 to change its registers. See also the related
4139 comment in linux_resume_one_lwp. */
4145 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4148 else if (regset
->type
== GENERAL_REGS
)
4149 saw_general_regs
= 1;
4153 if (saw_general_regs
)
4159 #else /* !HAVE_LINUX_REGSETS */
4161 #define use_linux_regsets 0
4162 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4163 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4167 /* Return 1 if register REGNO is supported by one of the regset ptrace
4168 calls or 0 if it has to be transferred individually. */
4171 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
4173 unsigned char mask
= 1 << (regno
% 8);
4174 size_t index
= regno
/ 8;
4176 return (use_linux_regsets
4177 && (regs_info
->regset_bitmap
== NULL
4178 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
4181 #ifdef HAVE_LINUX_USRREGS
4184 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
4188 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
4189 error ("Invalid register number %d.", regnum
);
4191 addr
= usrregs
->regmap
[regnum
];
4196 /* Fetch one register. */
4198 fetch_register (const struct usrregs_info
*usrregs
,
4199 struct regcache
*regcache
, int regno
)
4206 if (regno
>= usrregs
->num_regs
)
4208 if ((*the_low_target
.cannot_fetch_register
) (regno
))
4211 regaddr
= register_addr (usrregs
, regno
);
4215 size
= ((register_size (regcache
->tdesc
, regno
)
4216 + sizeof (PTRACE_XFER_TYPE
) - 1)
4217 & -sizeof (PTRACE_XFER_TYPE
));
4218 buf
= alloca (size
);
4220 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4221 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4224 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
4225 ptrace (PTRACE_PEEKUSER
, pid
,
4226 /* Coerce to a uintptr_t first to avoid potential gcc warning
4227 of coercing an 8 byte integer to a 4 byte pointer. */
4228 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
4229 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4231 error ("reading register %d: %s", regno
, strerror (errno
));
4234 if (the_low_target
.supply_ptrace_register
)
4235 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
4237 supply_register (regcache
, regno
, buf
);
4240 /* Store one register. */
4242 store_register (const struct usrregs_info
*usrregs
,
4243 struct regcache
*regcache
, int regno
)
4250 if (regno
>= usrregs
->num_regs
)
4252 if ((*the_low_target
.cannot_store_register
) (regno
))
4255 regaddr
= register_addr (usrregs
, regno
);
4259 size
= ((register_size (regcache
->tdesc
, regno
)
4260 + sizeof (PTRACE_XFER_TYPE
) - 1)
4261 & -sizeof (PTRACE_XFER_TYPE
));
4262 buf
= alloca (size
);
4263 memset (buf
, 0, size
);
4265 if (the_low_target
.collect_ptrace_register
)
4266 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
4268 collect_register (regcache
, regno
, buf
);
4270 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4271 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4274 ptrace (PTRACE_POKEUSER
, pid
,
4275 /* Coerce to a uintptr_t first to avoid potential gcc warning
4276 about coercing an 8 byte integer to a 4 byte pointer. */
4277 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
4278 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
4281 /* At this point, ESRCH should mean the process is
4282 already gone, in which case we simply ignore attempts
4283 to change its registers. See also the related
4284 comment in linux_resume_one_lwp. */
4288 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
4289 error ("writing register %d: %s", regno
, strerror (errno
));
4291 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4295 /* Fetch all registers, or just one, from the child process.
4296 If REGNO is -1, do this for all registers, skipping any that are
4297 assumed to have been retrieved by regsets_fetch_inferior_registers,
4298 unless ALL is non-zero.
4299 Otherwise, REGNO specifies which register (so we can save time). */
4301 usr_fetch_inferior_registers (const struct regs_info
*regs_info
,
4302 struct regcache
*regcache
, int regno
, int all
)
4304 struct usrregs_info
*usr
= regs_info
->usrregs
;
4308 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4309 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4310 fetch_register (usr
, regcache
, regno
);
4313 fetch_register (usr
, regcache
, regno
);
4316 /* Store our register values back into the inferior.
4317 If REGNO is -1, do this for all registers, skipping any that are
4318 assumed to have been saved by regsets_store_inferior_registers,
4319 unless ALL is non-zero.
4320 Otherwise, REGNO specifies which register (so we can save time). */
4322 usr_store_inferior_registers (const struct regs_info
*regs_info
,
4323 struct regcache
*regcache
, int regno
, int all
)
4325 struct usrregs_info
*usr
= regs_info
->usrregs
;
4329 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4330 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4331 store_register (usr
, regcache
, regno
);
4334 store_register (usr
, regcache
, regno
);
4337 #else /* !HAVE_LINUX_USRREGS */
4339 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4340 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4346 linux_fetch_registers (struct regcache
*regcache
, int regno
)
4350 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4354 if (the_low_target
.fetch_register
!= NULL
4355 && regs_info
->usrregs
!= NULL
)
4356 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
4357 (*the_low_target
.fetch_register
) (regcache
, regno
);
4359 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
4360 if (regs_info
->usrregs
!= NULL
)
4361 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
4365 if (the_low_target
.fetch_register
!= NULL
4366 && (*the_low_target
.fetch_register
) (regcache
, regno
))
4369 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
4371 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
4373 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
4374 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
4379 linux_store_registers (struct regcache
*regcache
, int regno
)
4383 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4387 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
4389 if (regs_info
->usrregs
!= NULL
)
4390 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
4394 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
4396 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
4398 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
4399 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
4404 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4405 to debugger memory starting at MYADDR. */
4408 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
4410 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4411 register PTRACE_XFER_TYPE
*buffer
;
4412 register CORE_ADDR addr
;
4419 /* Try using /proc. Don't bother for one word. */
4420 if (len
>= 3 * sizeof (long))
4424 /* We could keep this file open and cache it - possibly one per
4425 thread. That requires some juggling, but is even faster. */
4426 sprintf (filename
, "/proc/%d/mem", pid
);
4427 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
4431 /* If pread64 is available, use it. It's faster if the kernel
4432 supports it (only one syscall), and it's 64-bit safe even on
4433 32-bit platforms (for instance, SPARC debugging a SPARC64
4436 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
4439 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
4440 bytes
= read (fd
, myaddr
, len
);
4447 /* Some data was read, we'll try to get the rest with ptrace. */
4457 /* Round starting address down to longword boundary. */
4458 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4459 /* Round ending address up; get number of longwords that makes. */
4460 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4461 / sizeof (PTRACE_XFER_TYPE
));
4462 /* Allocate buffer of that many longwords. */
4463 buffer
= (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4465 /* Read all the longwords */
4467 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4469 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4470 about coercing an 8 byte integer to a 4 byte pointer. */
4471 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
4472 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4473 (PTRACE_TYPE_ARG4
) 0);
4479 /* Copy appropriate bytes out of the buffer. */
4482 i
*= sizeof (PTRACE_XFER_TYPE
);
4483 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
4485 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4492 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4493 memory at MEMADDR. On failure (cannot write to the inferior)
4494 returns the value of errno. Always succeeds if LEN is zero. */
4497 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
4500 /* Round starting address down to longword boundary. */
4501 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4502 /* Round ending address up; get number of longwords that makes. */
4504 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4505 / sizeof (PTRACE_XFER_TYPE
);
4507 /* Allocate buffer of that many longwords. */
4508 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*)
4509 alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4511 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4515 /* Zero length write always succeeds. */
4521 /* Dump up to four bytes. */
4522 unsigned int val
= * (unsigned int *) myaddr
;
4528 val
= val
& 0xffffff;
4529 fprintf (stderr
, "Writing %0*x to 0x%08lx\n", 2 * ((len
< 4) ? len
: 4),
4530 val
, (long)memaddr
);
4533 /* Fill start and end extra bytes of buffer with existing memory data. */
4536 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4537 about coercing an 8 byte integer to a 4 byte pointer. */
4538 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
4539 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4540 (PTRACE_TYPE_ARG4
) 0);
4548 = ptrace (PTRACE_PEEKTEXT
, pid
,
4549 /* Coerce to a uintptr_t first to avoid potential gcc warning
4550 about coercing an 8 byte integer to a 4 byte pointer. */
4551 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
4552 * sizeof (PTRACE_XFER_TYPE
)),
4553 (PTRACE_TYPE_ARG4
) 0);
4558 /* Copy data to be written over corresponding part of buffer. */
4560 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4563 /* Write the entire buffer. */
4565 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4568 ptrace (PTRACE_POKETEXT
, pid
,
4569 /* Coerce to a uintptr_t first to avoid potential gcc warning
4570 about coercing an 8 byte integer to a 4 byte pointer. */
4571 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4572 (PTRACE_TYPE_ARG4
) buffer
[i
]);
4581 linux_look_up_symbols (void)
4583 #ifdef USE_THREAD_DB
4584 struct process_info
*proc
= current_process ();
4586 if (proc
->private->thread_db
!= NULL
)
4589 /* If the kernel supports tracing clones, then we don't need to
4590 use the magic thread event breakpoint to learn about
4592 thread_db_init (!linux_supports_traceclone ());
4597 linux_request_interrupt (void)
4599 extern unsigned long signal_pid
;
4601 if (!ptid_equal (cont_thread
, null_ptid
)
4602 && !ptid_equal (cont_thread
, minus_one_ptid
))
4604 struct lwp_info
*lwp
;
4607 lwp
= get_thread_lwp (current_inferior
);
4608 lwpid
= lwpid_of (lwp
);
4609 kill_lwp (lwpid
, SIGINT
);
4612 kill_lwp (signal_pid
, SIGINT
);
4615 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4616 to debugger memory starting at MYADDR. */
4619 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
4621 char filename
[PATH_MAX
];
4623 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4625 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
4627 fd
= open (filename
, O_RDONLY
);
4631 if (offset
!= (CORE_ADDR
) 0
4632 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4635 n
= read (fd
, myaddr
, len
);
4642 /* These breakpoint and watchpoint related wrapper functions simply
4643 pass on the function call if the target has registered a
4644 corresponding function. */
4647 linux_insert_point (char type
, CORE_ADDR addr
, int len
)
4649 if (the_low_target
.insert_point
!= NULL
)
4650 return the_low_target
.insert_point (type
, addr
, len
);
4652 /* Unsupported (see target.h). */
4657 linux_remove_point (char type
, CORE_ADDR addr
, int len
)
4659 if (the_low_target
.remove_point
!= NULL
)
4660 return the_low_target
.remove_point (type
, addr
, len
);
4662 /* Unsupported (see target.h). */
4667 linux_stopped_by_watchpoint (void)
4669 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
4671 return lwp
->stopped_by_watchpoint
;
4675 linux_stopped_data_address (void)
4677 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
4679 return lwp
->stopped_data_address
;
4682 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4683 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4684 && defined(PT_TEXT_END_ADDR)
4686 /* This is only used for targets that define PT_TEXT_ADDR,
4687 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4688 the target has different ways of acquiring this information, like
4691 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4692 to tell gdb about. */
4695 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
4697 unsigned long text
, text_end
, data
;
4698 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
4702 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
4703 (PTRACE_TYPE_ARG4
) 0);
4704 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
4705 (PTRACE_TYPE_ARG4
) 0);
4706 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
4707 (PTRACE_TYPE_ARG4
) 0);
4711 /* Both text and data offsets produced at compile-time (and so
4712 used by gdb) are relative to the beginning of the program,
4713 with the data segment immediately following the text segment.
4714 However, the actual runtime layout in memory may put the data
4715 somewhere else, so when we send gdb a data base-address, we
4716 use the real data base address and subtract the compile-time
4717 data base-address from it (which is just the length of the
4718 text segment). BSS immediately follows data in both
4721 *data_p
= data
- (text_end
- text
);
4730 linux_qxfer_osdata (const char *annex
,
4731 unsigned char *readbuf
, unsigned const char *writebuf
,
4732 CORE_ADDR offset
, int len
)
4734 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
4737 /* Convert a native/host siginfo object, into/from the siginfo in the
4738 layout of the inferiors' architecture. */
4741 siginfo_fixup (siginfo_t
*siginfo
, void *inf_siginfo
, int direction
)
4745 if (the_low_target
.siginfo_fixup
!= NULL
)
4746 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
4748 /* If there was no callback, or the callback didn't do anything,
4749 then just do a straight memcpy. */
4753 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
4755 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
4760 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
4761 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
4765 char inf_siginfo
[sizeof (siginfo_t
)];
4767 if (current_inferior
== NULL
)
4770 pid
= lwpid_of (get_thread_lwp (current_inferior
));
4773 fprintf (stderr
, "%s siginfo for lwp %d.\n",
4774 readbuf
!= NULL
? "Reading" : "Writing",
4777 if (offset
>= sizeof (siginfo
))
4780 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
4783 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4784 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4785 inferior with a 64-bit GDBSERVER should look the same as debugging it
4786 with a 32-bit GDBSERVER, we need to convert it. */
4787 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
4789 if (offset
+ len
> sizeof (siginfo
))
4790 len
= sizeof (siginfo
) - offset
;
4792 if (readbuf
!= NULL
)
4793 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
4796 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
4798 /* Convert back to ptrace layout before flushing it out. */
4799 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
4801 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
4808 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4809 so we notice when children change state; as the handler for the
4810 sigsuspend in my_waitpid. */
4813 sigchld_handler (int signo
)
4815 int old_errno
= errno
;
4821 /* fprintf is not async-signal-safe, so call write
4823 if (write (2, "sigchld_handler\n",
4824 sizeof ("sigchld_handler\n") - 1) < 0)
4825 break; /* just ignore */
4829 if (target_is_async_p ())
4830 async_file_mark (); /* trigger a linux_wait */
4836 linux_supports_non_stop (void)
4842 linux_async (int enable
)
4844 int previous
= (linux_event_pipe
[0] != -1);
4847 fprintf (stderr
, "linux_async (%d), previous=%d\n",
4850 if (previous
!= enable
)
4853 sigemptyset (&mask
);
4854 sigaddset (&mask
, SIGCHLD
);
4856 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4860 if (pipe (linux_event_pipe
) == -1)
4861 fatal ("creating event pipe failed.");
4863 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4864 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4866 /* Register the event loop handler. */
4867 add_file_handler (linux_event_pipe
[0],
4868 handle_target_event
, NULL
);
4870 /* Always trigger a linux_wait. */
4875 delete_file_handler (linux_event_pipe
[0]);
4877 close (linux_event_pipe
[0]);
4878 close (linux_event_pipe
[1]);
4879 linux_event_pipe
[0] = -1;
4880 linux_event_pipe
[1] = -1;
4883 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
4890 linux_start_non_stop (int nonstop
)
4892 /* Register or unregister from event-loop accordingly. */
4893 linux_async (nonstop
);
4898 linux_supports_multi_process (void)
4904 linux_supports_disable_randomization (void)
4906 #ifdef HAVE_PERSONALITY
4914 linux_supports_agent (void)
4920 linux_supports_range_stepping (void)
4922 if (*the_low_target
.supports_range_stepping
== NULL
)
4925 return (*the_low_target
.supports_range_stepping
) ();
4928 /* Enumerate spufs IDs for process PID. */
4930 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
4936 struct dirent
*entry
;
4938 sprintf (path
, "/proc/%ld/fd", pid
);
4939 dir
= opendir (path
);
4944 while ((entry
= readdir (dir
)) != NULL
)
4950 fd
= atoi (entry
->d_name
);
4954 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
4955 if (stat (path
, &st
) != 0)
4957 if (!S_ISDIR (st
.st_mode
))
4960 if (statfs (path
, &stfs
) != 0)
4962 if (stfs
.f_type
!= SPUFS_MAGIC
)
4965 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
4967 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
4977 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4978 object type, using the /proc file system. */
4980 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
4981 unsigned const char *writebuf
,
4982 CORE_ADDR offset
, int len
)
4984 long pid
= lwpid_of (get_thread_lwp (current_inferior
));
4989 if (!writebuf
&& !readbuf
)
4997 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
5000 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
5001 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
5006 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5013 ret
= write (fd
, writebuf
, (size_t) len
);
5015 ret
= read (fd
, readbuf
, (size_t) len
);
5021 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5022 struct target_loadseg
5024 /* Core address to which the segment is mapped. */
5026 /* VMA recorded in the program header. */
5028 /* Size of this segment in memory. */
5032 # if defined PT_GETDSBT
5033 struct target_loadmap
5035 /* Protocol version number, must be zero. */
5037 /* Pointer to the DSBT table, its size, and the DSBT index. */
5038 unsigned *dsbt_table
;
5039 unsigned dsbt_size
, dsbt_index
;
5040 /* Number of segments in this map. */
5042 /* The actual memory map. */
5043 struct target_loadseg segs
[/*nsegs*/];
5045 # define LINUX_LOADMAP PT_GETDSBT
5046 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5047 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5049 struct target_loadmap
5051 /* Protocol version number, must be zero. */
5053 /* Number of segments in this map. */
5055 /* The actual memory map. */
5056 struct target_loadseg segs
[/*nsegs*/];
5058 # define LINUX_LOADMAP PTRACE_GETFDPIC
5059 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5060 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5064 linux_read_loadmap (const char *annex
, CORE_ADDR offset
,
5065 unsigned char *myaddr
, unsigned int len
)
5067 int pid
= lwpid_of (get_thread_lwp (current_inferior
));
5069 struct target_loadmap
*data
= NULL
;
5070 unsigned int actual_length
, copy_length
;
5072 if (strcmp (annex
, "exec") == 0)
5073 addr
= (int) LINUX_LOADMAP_EXEC
;
5074 else if (strcmp (annex
, "interp") == 0)
5075 addr
= (int) LINUX_LOADMAP_INTERP
;
5079 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
5085 actual_length
= sizeof (struct target_loadmap
)
5086 + sizeof (struct target_loadseg
) * data
->nsegs
;
5088 if (offset
< 0 || offset
> actual_length
)
5091 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
5092 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
5096 # define linux_read_loadmap NULL
5097 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5100 linux_process_qsupported (const char *query
)
5102 if (the_low_target
.process_qsupported
!= NULL
)
5103 the_low_target
.process_qsupported (query
);
5107 linux_supports_tracepoints (void)
5109 if (*the_low_target
.supports_tracepoints
== NULL
)
5112 return (*the_low_target
.supports_tracepoints
) ();
5116 linux_read_pc (struct regcache
*regcache
)
5118 if (the_low_target
.get_pc
== NULL
)
5121 return (*the_low_target
.get_pc
) (regcache
);
5125 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
5127 gdb_assert (the_low_target
.set_pc
!= NULL
);
5129 (*the_low_target
.set_pc
) (regcache
, pc
);
5133 linux_thread_stopped (struct thread_info
*thread
)
5135 return get_thread_lwp (thread
)->stopped
;
5138 /* This exposes stop-all-threads functionality to other modules. */
5141 linux_pause_all (int freeze
)
5143 stop_all_lwps (freeze
, NULL
);
5146 /* This exposes unstop-all-threads functionality to other gdbserver
5150 linux_unpause_all (int unfreeze
)
5152 unstop_all_lwps (unfreeze
, NULL
);
5156 linux_prepare_to_access_memory (void)
5158 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5161 linux_pause_all (1);
5166 linux_done_accessing_memory (void)
5168 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5171 linux_unpause_all (1);
5175 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
5176 CORE_ADDR collector
,
5179 CORE_ADDR
*jump_entry
,
5180 CORE_ADDR
*trampoline
,
5181 ULONGEST
*trampoline_size
,
5182 unsigned char *jjump_pad_insn
,
5183 ULONGEST
*jjump_pad_insn_size
,
5184 CORE_ADDR
*adjusted_insn_addr
,
5185 CORE_ADDR
*adjusted_insn_addr_end
,
5188 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
5189 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
5190 jump_entry
, trampoline
, trampoline_size
,
5191 jjump_pad_insn
, jjump_pad_insn_size
,
5192 adjusted_insn_addr
, adjusted_insn_addr_end
,
5196 static struct emit_ops
*
5197 linux_emit_ops (void)
5199 if (the_low_target
.emit_ops
!= NULL
)
5200 return (*the_low_target
.emit_ops
) ();
5206 linux_get_min_fast_tracepoint_insn_len (void)
5208 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
5211 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5214 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
5215 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
5217 char filename
[PATH_MAX
];
5219 const int auxv_size
= is_elf64
5220 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
5221 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
5223 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5225 fd
= open (filename
, O_RDONLY
);
5231 while (read (fd
, buf
, auxv_size
) == auxv_size
5232 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
5236 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
5238 switch (aux
->a_type
)
5241 *phdr_memaddr
= aux
->a_un
.a_val
;
5244 *num_phdr
= aux
->a_un
.a_val
;
5250 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
5252 switch (aux
->a_type
)
5255 *phdr_memaddr
= aux
->a_un
.a_val
;
5258 *num_phdr
= aux
->a_un
.a_val
;
5266 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
5268 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5269 "phdr_memaddr = %ld, phdr_num = %d",
5270 (long) *phdr_memaddr
, *num_phdr
);
5277 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5280 get_dynamic (const int pid
, const int is_elf64
)
5282 CORE_ADDR phdr_memaddr
, relocation
;
5284 unsigned char *phdr_buf
;
5285 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
5287 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
5290 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
5291 phdr_buf
= alloca (num_phdr
* phdr_size
);
5293 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
5296 /* Compute relocation: it is expected to be 0 for "regular" executables,
5297 non-zero for PIE ones. */
5299 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
5302 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5304 if (p
->p_type
== PT_PHDR
)
5305 relocation
= phdr_memaddr
- p
->p_vaddr
;
5309 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5311 if (p
->p_type
== PT_PHDR
)
5312 relocation
= phdr_memaddr
- p
->p_vaddr
;
5315 if (relocation
== -1)
5317 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5318 any real world executables, including PIE executables, have always
5319 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5320 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5321 or present DT_DEBUG anyway (fpc binaries are statically linked).
5323 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5325 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5330 for (i
= 0; i
< num_phdr
; i
++)
5334 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5336 if (p
->p_type
== PT_DYNAMIC
)
5337 return p
->p_vaddr
+ relocation
;
5341 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5343 if (p
->p_type
== PT_DYNAMIC
)
5344 return p
->p_vaddr
+ relocation
;
5351 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5352 can be 0 if the inferior does not yet have the library list initialized.
5353 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5354 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5357 get_r_debug (const int pid
, const int is_elf64
)
5359 CORE_ADDR dynamic_memaddr
;
5360 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
5361 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
5364 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
5365 if (dynamic_memaddr
== 0)
5368 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
5372 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
5373 #ifdef DT_MIPS_RLD_MAP
5377 unsigned char buf
[sizeof (Elf64_Xword
)];
5381 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
5383 if (linux_read_memory (dyn
->d_un
.d_val
,
5384 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
5389 #endif /* DT_MIPS_RLD_MAP */
5391 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
5392 map
= dyn
->d_un
.d_val
;
5394 if (dyn
->d_tag
== DT_NULL
)
5399 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
5400 #ifdef DT_MIPS_RLD_MAP
5404 unsigned char buf
[sizeof (Elf32_Word
)];
5408 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
5410 if (linux_read_memory (dyn
->d_un
.d_val
,
5411 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
5416 #endif /* DT_MIPS_RLD_MAP */
5418 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
5419 map
= dyn
->d_un
.d_val
;
5421 if (dyn
->d_tag
== DT_NULL
)
5425 dynamic_memaddr
+= dyn_size
;
5431 /* Read one pointer from MEMADDR in the inferior. */
5434 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
5438 /* Go through a union so this works on either big or little endian
5439 hosts, when the inferior's pointer size is smaller than the size
5440 of CORE_ADDR. It is assumed the inferior's endianness is the
5441 same of the superior's. */
5444 CORE_ADDR core_addr
;
5449 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
5452 if (ptr_size
== sizeof (CORE_ADDR
))
5453 *ptr
= addr
.core_addr
;
5454 else if (ptr_size
== sizeof (unsigned int))
5457 gdb_assert_not_reached ("unhandled pointer size");
5462 struct link_map_offsets
5464 /* Offset and size of r_debug.r_version. */
5465 int r_version_offset
;
5467 /* Offset and size of r_debug.r_map. */
5470 /* Offset to l_addr field in struct link_map. */
5473 /* Offset to l_name field in struct link_map. */
5476 /* Offset to l_ld field in struct link_map. */
5479 /* Offset to l_next field in struct link_map. */
5482 /* Offset to l_prev field in struct link_map. */
5486 /* Construct qXfer:libraries-svr4:read reply. */
5489 linux_qxfer_libraries_svr4 (const char *annex
, unsigned char *readbuf
,
5490 unsigned const char *writebuf
,
5491 CORE_ADDR offset
, int len
)
5494 unsigned document_len
;
5495 struct process_info_private
*const priv
= current_process ()->private;
5496 char filename
[PATH_MAX
];
5499 static const struct link_map_offsets lmo_32bit_offsets
=
5501 0, /* r_version offset. */
5502 4, /* r_debug.r_map offset. */
5503 0, /* l_addr offset in link_map. */
5504 4, /* l_name offset in link_map. */
5505 8, /* l_ld offset in link_map. */
5506 12, /* l_next offset in link_map. */
5507 16 /* l_prev offset in link_map. */
5510 static const struct link_map_offsets lmo_64bit_offsets
=
5512 0, /* r_version offset. */
5513 8, /* r_debug.r_map offset. */
5514 0, /* l_addr offset in link_map. */
5515 8, /* l_name offset in link_map. */
5516 16, /* l_ld offset in link_map. */
5517 24, /* l_next offset in link_map. */
5518 32 /* l_prev offset in link_map. */
5520 const struct link_map_offsets
*lmo
;
5521 unsigned int machine
;
5523 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
5524 int allocated
= 1024;
5526 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
5527 int header_done
= 0;
5529 if (writebuf
!= NULL
)
5531 if (readbuf
== NULL
)
5534 pid
= lwpid_of (get_thread_lwp (current_inferior
));
5535 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
5536 is_elf64
= elf_64_file_p (filename
, &machine
);
5537 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
5538 ptr_size
= is_elf64
? 8 : 4;
5540 while (annex
[0] != '\0')
5546 sep
= strchr (annex
, '=');
5551 if (len
== 5 && strncmp (annex
, "start", 5) == 0)
5553 else if (len
== 4 && strncmp (annex
, "prev", 4) == 0)
5557 annex
= strchr (sep
, ';');
5564 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
5571 if (priv
->r_debug
== 0)
5572 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
5574 /* We failed to find DT_DEBUG. Such situation will not change
5575 for this inferior - do not retry it. Report it to GDB as
5576 E01, see for the reasons at the GDB solib-svr4.c side. */
5577 if (priv
->r_debug
== (CORE_ADDR
) -1)
5580 if (priv
->r_debug
!= 0)
5582 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
5583 (unsigned char *) &r_version
,
5584 sizeof (r_version
)) != 0
5587 warning ("unexpected r_debug version %d", r_version
);
5589 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
5590 &lm_addr
, ptr_size
) != 0)
5592 warning ("unable to read r_map from 0x%lx",
5593 (long) priv
->r_debug
+ lmo
->r_map_offset
);
5598 document
= xmalloc (allocated
);
5599 strcpy (document
, "<library-list-svr4 version=\"1.0\"");
5600 p
= document
+ strlen (document
);
5603 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
5604 &l_name
, ptr_size
) == 0
5605 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
5606 &l_addr
, ptr_size
) == 0
5607 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
5608 &l_ld
, ptr_size
) == 0
5609 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
5610 &l_prev
, ptr_size
) == 0
5611 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
5612 &l_next
, ptr_size
) == 0)
5614 unsigned char libname
[PATH_MAX
];
5616 if (lm_prev
!= l_prev
)
5618 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5619 (long) lm_prev
, (long) l_prev
);
5623 /* Ignore the first entry even if it has valid name as the first entry
5624 corresponds to the main executable. The first entry should not be
5625 skipped if the dynamic loader was loaded late by a static executable
5626 (see solib-svr4.c parameter ignore_first). But in such case the main
5627 executable does not have PT_DYNAMIC present and this function already
5628 exited above due to failed get_r_debug. */
5631 sprintf (p
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
5636 /* Not checking for error because reading may stop before
5637 we've got PATH_MAX worth of characters. */
5639 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
5640 libname
[sizeof (libname
) - 1] = '\0';
5641 if (libname
[0] != '\0')
5643 /* 6x the size for xml_escape_text below. */
5644 size_t len
= 6 * strlen ((char *) libname
);
5649 /* Terminate `<library-list-svr4'. */
5654 while (allocated
< p
- document
+ len
+ 200)
5656 /* Expand to guarantee sufficient storage. */
5657 uintptr_t document_len
= p
- document
;
5659 document
= xrealloc (document
, 2 * allocated
);
5661 p
= document
+ document_len
;
5664 name
= xml_escape_text ((char *) libname
);
5665 p
+= sprintf (p
, "<library name=\"%s\" lm=\"0x%lx\" "
5666 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5667 name
, (unsigned long) lm_addr
,
5668 (unsigned long) l_addr
, (unsigned long) l_ld
);
5679 /* Empty list; terminate `<library-list-svr4'. */
5683 strcpy (p
, "</library-list-svr4>");
5685 document_len
= strlen (document
);
5686 if (offset
< document_len
)
5687 document_len
-= offset
;
5690 if (len
> document_len
)
5693 memcpy (readbuf
, document
+ offset
, len
);
5699 #ifdef HAVE_LINUX_BTRACE
5701 /* Enable branch tracing. */
5703 static struct btrace_target_info
*
5704 linux_low_enable_btrace (ptid_t ptid
)
5706 struct btrace_target_info
*tinfo
;
5708 tinfo
= linux_enable_btrace (ptid
);
5712 struct thread_info
*thread
= find_thread_ptid (ptid
);
5713 struct regcache
*regcache
= get_thread_regcache (thread
, 0);
5715 tinfo
->ptr_bits
= register_size (regcache
->tdesc
, 0) * 8;
5721 /* Read branch trace data as btrace xml document. */
5724 linux_low_read_btrace (struct btrace_target_info
*tinfo
, struct buffer
*buffer
,
5727 VEC (btrace_block_s
) *btrace
;
5728 struct btrace_block
*block
;
5731 btrace
= linux_read_btrace (tinfo
, type
);
5733 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5734 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
5736 for (i
= 0; VEC_iterate (btrace_block_s
, btrace
, i
, block
); i
++)
5737 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5738 paddress (block
->begin
), paddress (block
->end
));
5740 buffer_grow_str (buffer
, "</btrace>\n");
5742 VEC_free (btrace_block_s
, btrace
);
5744 #endif /* HAVE_LINUX_BTRACE */
5746 static struct target_ops linux_target_ops
= {
5747 linux_create_inferior
,
5756 linux_fetch_registers
,
5757 linux_store_registers
,
5758 linux_prepare_to_access_memory
,
5759 linux_done_accessing_memory
,
5762 linux_look_up_symbols
,
5763 linux_request_interrupt
,
5767 linux_stopped_by_watchpoint
,
5768 linux_stopped_data_address
,
5769 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5770 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5771 && defined(PT_TEXT_END_ADDR)
5776 #ifdef USE_THREAD_DB
5777 thread_db_get_tls_address
,
5782 hostio_last_error_from_errno
,
5785 linux_supports_non_stop
,
5787 linux_start_non_stop
,
5788 linux_supports_multi_process
,
5789 #ifdef USE_THREAD_DB
5790 thread_db_handle_monitor_command
,
5794 linux_common_core_of_thread
,
5796 linux_process_qsupported
,
5797 linux_supports_tracepoints
,
5800 linux_thread_stopped
,
5804 linux_cancel_breakpoints
,
5805 linux_stabilize_threads
,
5806 linux_install_fast_tracepoint_jump_pad
,
5808 linux_supports_disable_randomization
,
5809 linux_get_min_fast_tracepoint_insn_len
,
5810 linux_qxfer_libraries_svr4
,
5811 linux_supports_agent
,
5812 #ifdef HAVE_LINUX_BTRACE
5813 linux_supports_btrace
,
5814 linux_low_enable_btrace
,
5815 linux_disable_btrace
,
5816 linux_low_read_btrace
,
5823 linux_supports_range_stepping
,
5827 linux_init_signals ()
5829 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5830 to find what the cancel signal actually is. */
5831 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5832 signal (__SIGRTMIN
+1, SIG_IGN
);
5836 #ifdef HAVE_LINUX_REGSETS
5838 initialize_regsets_info (struct regsets_info
*info
)
5840 for (info
->num_regsets
= 0;
5841 info
->regsets
[info
->num_regsets
].size
>= 0;
5842 info
->num_regsets
++)
5848 initialize_low (void)
5850 struct sigaction sigchld_action
;
5851 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
5852 set_target_ops (&linux_target_ops
);
5853 set_breakpoint_data (the_low_target
.breakpoint
,
5854 the_low_target
.breakpoint_len
);
5855 linux_init_signals ();
5856 linux_ptrace_init_warnings ();
5858 sigchld_action
.sa_handler
= sigchld_handler
;
5859 sigemptyset (&sigchld_action
.sa_mask
);
5860 sigchld_action
.sa_flags
= SA_RESTART
;
5861 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
5863 initialize_low_arch ();