1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
32 #include <sys/ioctl.h>
35 #include <sys/syscall.h>
39 #include <sys/types.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
56 #define SPUFS_MAGIC 0x23c9b64e
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
74 /* This is the kernel's hard limit. Not to be confused with
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
113 uint32_t a_type
; /* Entry type */
116 uint32_t a_val
; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
128 uint64_t a_type
; /* Entry type */
131 uint64_t a_val
; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
141 /* See nat/linux-nat.h. */
144 ptid_of_lwp (struct lwp_info
*lwp
)
146 return ptid_of (get_lwp_thread (lwp
));
149 /* See nat/linux-nat.h. */
152 lwp_is_stopped (struct lwp_info
*lwp
)
157 /* See nat/linux-nat.h. */
159 enum target_stop_reason
160 lwp_stop_reason (struct lwp_info
*lwp
)
162 return lwp
->stop_reason
;
165 /* A list of all unknown processes which receive stop signals. Some
166 other process will presumably claim each of these as forked
167 children momentarily. */
169 struct simple_pid_list
171 /* The process ID. */
174 /* The status as reported by waitpid. */
178 struct simple_pid_list
*next
;
180 struct simple_pid_list
*stopped_pids
;
182 /* Trivial list manipulation functions to keep track of a list of new
183 stopped processes. */
186 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
188 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
191 new_pid
->status
= status
;
192 new_pid
->next
= *listp
;
197 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
199 struct simple_pid_list
**p
;
201 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
202 if ((*p
)->pid
== pid
)
204 struct simple_pid_list
*next
= (*p
)->next
;
206 *statusp
= (*p
)->status
;
214 enum stopping_threads_kind
216 /* Not stopping threads presently. */
217 NOT_STOPPING_THREADS
,
219 /* Stopping threads. */
222 /* Stopping and suspending threads. */
223 STOPPING_AND_SUSPENDING_THREADS
226 /* This is set while stop_all_lwps is in effect. */
227 enum stopping_threads_kind stopping_threads
= NOT_STOPPING_THREADS
;
229 /* FIXME make into a target method? */
230 int using_threads
= 1;
232 /* True if we're presently stabilizing threads (moving them out of
234 static int stabilizing_threads
;
236 static void linux_resume_one_lwp (struct lwp_info
*lwp
,
237 int step
, int signal
, siginfo_t
*info
);
238 static void linux_resume (struct thread_resume
*resume_info
, size_t n
);
239 static void stop_all_lwps (int suspend
, struct lwp_info
*except
);
240 static void unstop_all_lwps (int unsuspend
, struct lwp_info
*except
);
241 static int linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
242 int *wstat
, int options
);
243 static int linux_wait_for_event (ptid_t ptid
, int *wstat
, int options
);
244 static struct lwp_info
*add_lwp (ptid_t ptid
);
245 static int linux_stopped_by_watchpoint (void);
246 static void mark_lwp_dead (struct lwp_info
*lwp
, int wstat
);
247 static void proceed_all_lwps (void);
248 static int finish_step_over (struct lwp_info
*lwp
);
249 static int kill_lwp (unsigned long lwpid
, int signo
);
251 /* When the event-loop is doing a step-over, this points at the thread
253 ptid_t step_over_bkpt
;
255 /* True if the low target can hardware single-step. Such targets
256 don't need a BREAKPOINT_REINSERT_ADDR callback. */
259 can_hardware_single_step (void)
261 return (the_low_target
.breakpoint_reinsert_addr
== NULL
);
264 /* True if the low target supports memory breakpoints. If so, we'll
265 have a GET_PC implementation. */
268 supports_breakpoints (void)
270 return (the_low_target
.get_pc
!= NULL
);
273 /* Returns true if this target can support fast tracepoints. This
274 does not mean that the in-process agent has been loaded in the
278 supports_fast_tracepoints (void)
280 return the_low_target
.install_fast_tracepoint_jump_pad
!= NULL
;
283 /* True if LWP is stopped in its stepping range. */
286 lwp_in_step_range (struct lwp_info
*lwp
)
288 CORE_ADDR pc
= lwp
->stop_pc
;
290 return (pc
>= lwp
->step_range_start
&& pc
< lwp
->step_range_end
);
293 struct pending_signals
297 struct pending_signals
*prev
;
300 /* The read/write ends of the pipe registered as waitable file in the
302 static int linux_event_pipe
[2] = { -1, -1 };
304 /* True if we're currently in async mode. */
305 #define target_is_async_p() (linux_event_pipe[0] != -1)
307 static void send_sigstop (struct lwp_info
*lwp
);
308 static void wait_for_sigstop (void);
310 /* Return non-zero if HEADER is a 64-bit ELF file. */
313 elf_64_header_p (const Elf64_Ehdr
*header
, unsigned int *machine
)
315 if (header
->e_ident
[EI_MAG0
] == ELFMAG0
316 && header
->e_ident
[EI_MAG1
] == ELFMAG1
317 && header
->e_ident
[EI_MAG2
] == ELFMAG2
318 && header
->e_ident
[EI_MAG3
] == ELFMAG3
)
320 *machine
= header
->e_machine
;
321 return header
->e_ident
[EI_CLASS
] == ELFCLASS64
;
328 /* Return non-zero if FILE is a 64-bit ELF file,
329 zero if the file is not a 64-bit ELF file,
330 and -1 if the file is not accessible or doesn't exist. */
333 elf_64_file_p (const char *file
, unsigned int *machine
)
338 fd
= open (file
, O_RDONLY
);
342 if (read (fd
, &header
, sizeof (header
)) != sizeof (header
))
349 return elf_64_header_p (&header
, machine
);
352 /* Accepts an integer PID; Returns true if the executable PID is
353 running is a 64-bit ELF file.. */
356 linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
)
360 sprintf (file
, "/proc/%d/exe", pid
);
361 return elf_64_file_p (file
, machine
);
365 delete_lwp (struct lwp_info
*lwp
)
367 struct thread_info
*thr
= get_lwp_thread (lwp
);
370 debug_printf ("deleting %ld\n", lwpid_of (thr
));
373 free (lwp
->arch_private
);
377 /* Add a process to the common process list, and set its private
380 static struct process_info
*
381 linux_add_process (int pid
, int attached
)
383 struct process_info
*proc
;
385 proc
= add_process (pid
, attached
);
386 proc
->priv
= xcalloc (1, sizeof (*proc
->priv
));
388 /* Set the arch when the first LWP stops. */
389 proc
->priv
->new_inferior
= 1;
391 if (the_low_target
.new_process
!= NULL
)
392 proc
->priv
->arch_private
= the_low_target
.new_process ();
397 static CORE_ADDR
get_pc (struct lwp_info
*lwp
);
399 /* Handle a GNU/Linux extended wait response. If we see a clone
400 event, we need to add the new LWP to our list (and not report the
401 trap to higher layers). */
404 handle_extended_wait (struct lwp_info
*event_child
, int wstat
)
406 int event
= linux_ptrace_get_extended_event (wstat
);
407 struct thread_info
*event_thr
= get_lwp_thread (event_child
);
408 struct lwp_info
*new_lwp
;
410 if (event
== PTRACE_EVENT_CLONE
)
413 unsigned long new_pid
;
416 ptrace (PTRACE_GETEVENTMSG
, lwpid_of (event_thr
), (PTRACE_TYPE_ARG3
) 0,
419 /* If we haven't already seen the new PID stop, wait for it now. */
420 if (!pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
422 /* The new child has a pending SIGSTOP. We can't affect it until it
423 hits the SIGSTOP, but we're already attached. */
425 ret
= my_waitpid (new_pid
, &status
, __WALL
);
428 perror_with_name ("waiting for new child");
429 else if (ret
!= new_pid
)
430 warning ("wait returned unexpected PID %d", ret
);
431 else if (!WIFSTOPPED (status
))
432 warning ("wait returned unexpected status 0x%x", status
);
436 debug_printf ("HEW: Got clone event "
437 "from LWP %ld, new child is LWP %ld\n",
438 lwpid_of (event_thr
), new_pid
);
440 ptid
= ptid_build (pid_of (event_thr
), new_pid
, 0);
441 new_lwp
= add_lwp (ptid
);
443 /* Either we're going to immediately resume the new thread
444 or leave it stopped. linux_resume_one_lwp is a nop if it
445 thinks the thread is currently running, so set this first
446 before calling linux_resume_one_lwp. */
447 new_lwp
->stopped
= 1;
449 /* If we're suspending all threads, leave this one suspended
451 if (stopping_threads
== STOPPING_AND_SUSPENDING_THREADS
)
452 new_lwp
->suspended
= 1;
454 /* Normally we will get the pending SIGSTOP. But in some cases
455 we might get another signal delivered to the group first.
456 If we do get another signal, be sure not to lose it. */
457 if (WSTOPSIG (status
) != SIGSTOP
)
459 new_lwp
->stop_expected
= 1;
460 new_lwp
->status_pending_p
= 1;
461 new_lwp
->status_pending
= status
;
466 /* Return the PC as read from the regcache of LWP, without any
470 get_pc (struct lwp_info
*lwp
)
472 struct thread_info
*saved_thread
;
473 struct regcache
*regcache
;
476 if (the_low_target
.get_pc
== NULL
)
479 saved_thread
= current_thread
;
480 current_thread
= get_lwp_thread (lwp
);
482 regcache
= get_thread_regcache (current_thread
, 1);
483 pc
= (*the_low_target
.get_pc
) (regcache
);
486 debug_printf ("pc is 0x%lx\n", (long) pc
);
488 current_thread
= saved_thread
;
492 /* This function should only be called if LWP got a SIGTRAP.
493 The SIGTRAP could mean several things.
495 On i386, where decr_pc_after_break is non-zero:
497 If we were single-stepping this process using PTRACE_SINGLESTEP, we
498 will get only the one SIGTRAP. The value of $eip will be the next
499 instruction. If the instruction we stepped over was a breakpoint,
500 we need to decrement the PC.
502 If we continue the process using PTRACE_CONT, we will get a
503 SIGTRAP when we hit a breakpoint. The value of $eip will be
504 the instruction after the breakpoint (i.e. needs to be
505 decremented). If we report the SIGTRAP to GDB, we must also
506 report the undecremented PC. If the breakpoint is removed, we
507 must resume at the decremented PC.
509 On a non-decr_pc_after_break machine with hardware or kernel
512 If we either single-step a breakpoint instruction, or continue and
513 hit a breakpoint instruction, our PC will point at the breakpoint
517 check_stopped_by_breakpoint (struct lwp_info
*lwp
)
520 CORE_ADDR sw_breakpoint_pc
;
521 struct thread_info
*saved_thread
;
522 #if USE_SIGTRAP_SIGINFO
526 if (the_low_target
.get_pc
== NULL
)
530 sw_breakpoint_pc
= pc
- the_low_target
.decr_pc_after_break
;
532 /* breakpoint_at reads from the current thread. */
533 saved_thread
= current_thread
;
534 current_thread
= get_lwp_thread (lwp
);
536 #if USE_SIGTRAP_SIGINFO
537 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
538 (PTRACE_TYPE_ARG3
) 0, &siginfo
) == 0)
540 if (siginfo
.si_signo
== SIGTRAP
)
542 if (siginfo
.si_code
== GDB_ARCH_TRAP_BRKPT
)
546 struct thread_info
*thr
= get_lwp_thread (lwp
);
548 debug_printf ("CSBB: Push back software breakpoint for %s\n",
549 target_pid_to_str (ptid_of (thr
)));
552 /* Back up the PC if necessary. */
553 if (pc
!= sw_breakpoint_pc
)
555 struct regcache
*regcache
556 = get_thread_regcache (current_thread
, 1);
557 (*the_low_target
.set_pc
) (regcache
, sw_breakpoint_pc
);
560 lwp
->stop_pc
= sw_breakpoint_pc
;
561 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
562 current_thread
= saved_thread
;
565 else if (siginfo
.si_code
== TRAP_HWBKPT
)
569 struct thread_info
*thr
= get_lwp_thread (lwp
);
571 debug_printf ("CSBB: Push back hardware "
572 "breakpoint/watchpoint for %s\n",
573 target_pid_to_str (ptid_of (thr
)));
577 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
578 current_thread
= saved_thread
;
584 /* We may have just stepped a breakpoint instruction. E.g., in
585 non-stop mode, GDB first tells the thread A to step a range, and
586 then the user inserts a breakpoint inside the range. In that
587 case we need to report the breakpoint PC. */
588 if ((!lwp
->stepping
|| lwp
->stop_pc
== sw_breakpoint_pc
)
589 && (*the_low_target
.breakpoint_at
) (sw_breakpoint_pc
))
593 struct thread_info
*thr
= get_lwp_thread (lwp
);
595 debug_printf ("CSBB: %s stopped by software breakpoint\n",
596 target_pid_to_str (ptid_of (thr
)));
599 /* Back up the PC if necessary. */
600 if (pc
!= sw_breakpoint_pc
)
602 struct regcache
*regcache
603 = get_thread_regcache (current_thread
, 1);
604 (*the_low_target
.set_pc
) (regcache
, sw_breakpoint_pc
);
607 lwp
->stop_pc
= sw_breakpoint_pc
;
608 lwp
->stop_reason
= TARGET_STOPPED_BY_SW_BREAKPOINT
;
609 current_thread
= saved_thread
;
613 if (hardware_breakpoint_inserted_here (pc
))
617 struct thread_info
*thr
= get_lwp_thread (lwp
);
619 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
620 target_pid_to_str (ptid_of (thr
)));
624 lwp
->stop_reason
= TARGET_STOPPED_BY_HW_BREAKPOINT
;
625 current_thread
= saved_thread
;
630 current_thread
= saved_thread
;
634 static struct lwp_info
*
635 add_lwp (ptid_t ptid
)
637 struct lwp_info
*lwp
;
639 lwp
= (struct lwp_info
*) xmalloc (sizeof (*lwp
));
640 memset (lwp
, 0, sizeof (*lwp
));
642 if (the_low_target
.new_thread
!= NULL
)
643 lwp
->arch_private
= the_low_target
.new_thread ();
645 lwp
->thread
= add_thread (ptid
, lwp
);
650 /* Start an inferior process and returns its pid.
651 ALLARGS is a vector of program-name and args. */
654 linux_create_inferior (char *program
, char **allargs
)
656 struct lwp_info
*new_lwp
;
659 struct cleanup
*restore_personality
660 = maybe_disable_address_space_randomization (disable_randomization
);
662 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
668 perror_with_name ("fork");
673 ptrace (PTRACE_TRACEME
, 0, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
675 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
676 signal (__SIGRTMIN
+ 1, SIG_DFL
);
681 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
682 stdout to stderr so that inferior i/o doesn't corrupt the connection.
683 Also, redirect stdin to /dev/null. */
684 if (remote_connection_is_stdio ())
687 open ("/dev/null", O_RDONLY
);
689 if (write (2, "stdin/stdout redirected\n",
690 sizeof ("stdin/stdout redirected\n") - 1) < 0)
692 /* Errors ignored. */;
696 execv (program
, allargs
);
698 execvp (program
, allargs
);
700 fprintf (stderr
, "Cannot exec %s: %s.\n", program
,
706 do_cleanups (restore_personality
);
708 linux_add_process (pid
, 0);
710 ptid
= ptid_build (pid
, pid
, 0);
711 new_lwp
= add_lwp (ptid
);
712 new_lwp
->must_set_ptrace_flags
= 1;
717 /* Attach to an inferior process. Returns 0 on success, ERRNO on
721 linux_attach_lwp (ptid_t ptid
)
723 struct lwp_info
*new_lwp
;
724 int lwpid
= ptid_get_lwp (ptid
);
726 if (ptrace (PTRACE_ATTACH
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0)
730 new_lwp
= add_lwp (ptid
);
732 /* We need to wait for SIGSTOP before being able to make the next
733 ptrace call on this LWP. */
734 new_lwp
->must_set_ptrace_flags
= 1;
736 if (linux_proc_pid_is_stopped (lwpid
))
739 debug_printf ("Attached to a stopped process\n");
741 /* The process is definitely stopped. It is in a job control
742 stop, unless the kernel predates the TASK_STOPPED /
743 TASK_TRACED distinction, in which case it might be in a
744 ptrace stop. Make sure it is in a ptrace stop; from there we
745 can kill it, signal it, et cetera.
747 First make sure there is a pending SIGSTOP. Since we are
748 already attached, the process can not transition from stopped
749 to running without a PTRACE_CONT; so we know this signal will
750 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
751 probably already in the queue (unless this kernel is old
752 enough to use TASK_STOPPED for ptrace stops); but since
753 SIGSTOP is not an RT signal, it can only be queued once. */
754 kill_lwp (lwpid
, SIGSTOP
);
756 /* Finally, resume the stopped process. This will deliver the
757 SIGSTOP (or a higher priority signal, just like normal
758 PTRACE_ATTACH), which we'll catch later on. */
759 ptrace (PTRACE_CONT
, lwpid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
762 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
765 There are several cases to consider here:
767 1) gdbserver has already attached to the process and is being notified
768 of a new thread that is being created.
769 In this case we should ignore that SIGSTOP and resume the
770 process. This is handled below by setting stop_expected = 1,
771 and the fact that add_thread sets last_resume_kind ==
774 2) This is the first thread (the process thread), and we're attaching
775 to it via attach_inferior.
776 In this case we want the process thread to stop.
777 This is handled by having linux_attach set last_resume_kind ==
778 resume_stop after we return.
780 If the pid we are attaching to is also the tgid, we attach to and
781 stop all the existing threads. Otherwise, we attach to pid and
782 ignore any other threads in the same group as this pid.
784 3) GDB is connecting to gdbserver and is requesting an enumeration of all
786 In this case we want the thread to stop.
787 FIXME: This case is currently not properly handled.
788 We should wait for the SIGSTOP but don't. Things work apparently
789 because enough time passes between when we ptrace (ATTACH) and when
790 gdb makes the next ptrace call on the thread.
792 On the other hand, if we are currently trying to stop all threads, we
793 should treat the new thread as if we had sent it a SIGSTOP. This works
794 because we are guaranteed that the add_lwp call above added us to the
795 end of the list, and so the new thread has not yet reached
796 wait_for_sigstop (but will). */
797 new_lwp
->stop_expected
= 1;
802 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
803 already attached. Returns true if a new LWP is found, false
807 attach_proc_task_lwp_callback (ptid_t ptid
)
809 /* Is this a new thread? */
810 if (find_thread_ptid (ptid
) == NULL
)
812 int lwpid
= ptid_get_lwp (ptid
);
816 debug_printf ("Found new lwp %d\n", lwpid
);
818 err
= linux_attach_lwp (ptid
);
820 /* Be quiet if we simply raced with the thread exiting. EPERM
821 is returned if the thread's task still exists, and is marked
822 as exited or zombie, as well as other conditions, so in that
823 case, confirm the status in /proc/PID/status. */
825 || (err
== EPERM
&& linux_proc_pid_is_gone (lwpid
)))
829 debug_printf ("Cannot attach to lwp %d: "
830 "thread is gone (%d: %s)\n",
831 lwpid
, err
, strerror (err
));
836 warning (_("Cannot attach to lwp %d: %s"),
838 linux_ptrace_attach_fail_reason_string (ptid
, err
));
846 /* Attach to PID. If PID is the tgid, attach to it and all
850 linux_attach (unsigned long pid
)
852 ptid_t ptid
= ptid_build (pid
, pid
, 0);
855 /* Attach to PID. We will check for other threads
857 err
= linux_attach_lwp (ptid
);
859 error ("Cannot attach to process %ld: %s",
860 pid
, linux_ptrace_attach_fail_reason_string (ptid
, err
));
862 linux_add_process (pid
, 1);
866 struct thread_info
*thread
;
868 /* Don't ignore the initial SIGSTOP if we just attached to this
869 process. It will be collected by wait shortly. */
870 thread
= find_thread_ptid (ptid_build (pid
, pid
, 0));
871 thread
->last_resume_kind
= resume_stop
;
874 /* We must attach to every LWP. If /proc is mounted, use that to
875 find them now. On the one hand, the inferior may be using raw
876 clone instead of using pthreads. On the other hand, even if it
877 is using pthreads, GDB may not be connected yet (thread_db needs
878 to do symbol lookups, through qSymbol). Also, thread_db walks
879 structures in the inferior's address space to find the list of
880 threads/LWPs, and those structures may well be corrupted. Note
881 that once thread_db is loaded, we'll still use it to list threads
882 and associate pthread info with each LWP. */
883 linux_proc_attach_tgid_threads (pid
, attach_proc_task_lwp_callback
);
894 second_thread_of_pid_p (struct inferior_list_entry
*entry
, void *args
)
896 struct counter
*counter
= args
;
898 if (ptid_get_pid (entry
->id
) == counter
->pid
)
900 if (++counter
->count
> 1)
908 last_thread_of_process_p (int pid
)
910 struct counter counter
= { pid
, 0 };
912 return (find_inferior (&all_threads
,
913 second_thread_of_pid_p
, &counter
) == NULL
);
919 linux_kill_one_lwp (struct lwp_info
*lwp
)
921 struct thread_info
*thr
= get_lwp_thread (lwp
);
922 int pid
= lwpid_of (thr
);
924 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
925 there is no signal context, and ptrace(PTRACE_KILL) (or
926 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
927 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
928 alternative is to kill with SIGKILL. We only need one SIGKILL
929 per process, not one for each thread. But since we still support
930 linuxthreads, and we also support debugging programs using raw
931 clone without CLONE_THREAD, we send one for each thread. For
932 years, we used PTRACE_KILL only, so we're being a bit paranoid
933 about some old kernels where PTRACE_KILL might work better
934 (dubious if there are any such, but that's why it's paranoia), so
935 we try SIGKILL first, PTRACE_KILL second, and so we're fine
939 kill_lwp (pid
, SIGKILL
);
942 int save_errno
= errno
;
944 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
945 target_pid_to_str (ptid_of (thr
)),
946 save_errno
? strerror (save_errno
) : "OK");
950 ptrace (PTRACE_KILL
, pid
, (PTRACE_TYPE_ARG3
) 0, (PTRACE_TYPE_ARG4
) 0);
953 int save_errno
= errno
;
955 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
956 target_pid_to_str (ptid_of (thr
)),
957 save_errno
? strerror (save_errno
) : "OK");
961 /* Kill LWP and wait for it to die. */
964 kill_wait_lwp (struct lwp_info
*lwp
)
966 struct thread_info
*thr
= get_lwp_thread (lwp
);
967 int pid
= ptid_get_pid (ptid_of (thr
));
968 int lwpid
= ptid_get_lwp (ptid_of (thr
));
973 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid
, pid
);
977 linux_kill_one_lwp (lwp
);
979 /* Make sure it died. Notes:
981 - The loop is most likely unnecessary.
983 - We don't use linux_wait_for_event as that could delete lwps
984 while we're iterating over them. We're not interested in
985 any pending status at this point, only in making sure all
986 wait status on the kernel side are collected until the
989 - We don't use __WALL here as the __WALL emulation relies on
990 SIGCHLD, and killing a stopped process doesn't generate
991 one, nor an exit status.
993 res
= my_waitpid (lwpid
, &wstat
, 0);
994 if (res
== -1 && errno
== ECHILD
)
995 res
= my_waitpid (lwpid
, &wstat
, __WCLONE
);
996 } while (res
> 0 && WIFSTOPPED (wstat
));
998 gdb_assert (res
> 0);
1001 /* Callback for `find_inferior'. Kills an lwp of a given process,
1002 except the leader. */
1005 kill_one_lwp_callback (struct inferior_list_entry
*entry
, void *args
)
1007 struct thread_info
*thread
= (struct thread_info
*) entry
;
1008 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1009 int pid
= * (int *) args
;
1011 if (ptid_get_pid (entry
->id
) != pid
)
1014 /* We avoid killing the first thread here, because of a Linux kernel (at
1015 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1016 the children get a chance to be reaped, it will remain a zombie
1019 if (lwpid_of (thread
) == pid
)
1022 debug_printf ("lkop: is last of process %s\n",
1023 target_pid_to_str (entry
->id
));
1027 kill_wait_lwp (lwp
);
1032 linux_kill (int pid
)
1034 struct process_info
*process
;
1035 struct lwp_info
*lwp
;
1037 process
= find_process_pid (pid
);
1038 if (process
== NULL
)
1041 /* If we're killing a running inferior, make sure it is stopped
1042 first, as PTRACE_KILL will not work otherwise. */
1043 stop_all_lwps (0, NULL
);
1045 find_inferior (&all_threads
, kill_one_lwp_callback
, &pid
);
1047 /* See the comment in linux_kill_one_lwp. We did not kill the first
1048 thread in the list, so do so now. */
1049 lwp
= find_lwp_pid (pid_to_ptid (pid
));
1054 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1058 kill_wait_lwp (lwp
);
1060 the_target
->mourn (process
);
1062 /* Since we presently can only stop all lwps of all processes, we
1063 need to unstop lwps of other processes. */
1064 unstop_all_lwps (0, NULL
);
1068 /* Get pending signal of THREAD, for detaching purposes. This is the
1069 signal the thread last stopped for, which we need to deliver to the
1070 thread when detaching, otherwise, it'd be suppressed/lost. */
1073 get_detach_signal (struct thread_info
*thread
)
1075 enum gdb_signal signo
= GDB_SIGNAL_0
;
1077 struct lwp_info
*lp
= get_thread_lwp (thread
);
1079 if (lp
->status_pending_p
)
1080 status
= lp
->status_pending
;
1083 /* If the thread had been suspended by gdbserver, and it stopped
1084 cleanly, then it'll have stopped with SIGSTOP. But we don't
1085 want to deliver that SIGSTOP. */
1086 if (thread
->last_status
.kind
!= TARGET_WAITKIND_STOPPED
1087 || thread
->last_status
.value
.sig
== GDB_SIGNAL_0
)
1090 /* Otherwise, we may need to deliver the signal we
1092 status
= lp
->last_status
;
1095 if (!WIFSTOPPED (status
))
1098 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1099 target_pid_to_str (ptid_of (thread
)));
1103 /* Extended wait statuses aren't real SIGTRAPs. */
1104 if (WSTOPSIG (status
) == SIGTRAP
&& linux_is_extended_waitstatus (status
))
1107 debug_printf ("GPS: lwp %s had stopped with extended "
1108 "status: no pending signal\n",
1109 target_pid_to_str (ptid_of (thread
)));
1113 signo
= gdb_signal_from_host (WSTOPSIG (status
));
1115 if (program_signals_p
&& !program_signals
[signo
])
1118 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1119 target_pid_to_str (ptid_of (thread
)),
1120 gdb_signal_to_string (signo
));
1123 else if (!program_signals_p
1124 /* If we have no way to know which signals GDB does not
1125 want to have passed to the program, assume
1126 SIGTRAP/SIGINT, which is GDB's default. */
1127 && (signo
== GDB_SIGNAL_TRAP
|| signo
== GDB_SIGNAL_INT
))
1130 debug_printf ("GPS: lwp %s had signal %s, "
1131 "but we don't know if we should pass it. "
1132 "Default to not.\n",
1133 target_pid_to_str (ptid_of (thread
)),
1134 gdb_signal_to_string (signo
));
1140 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1141 target_pid_to_str (ptid_of (thread
)),
1142 gdb_signal_to_string (signo
));
1144 return WSTOPSIG (status
);
1149 linux_detach_one_lwp (struct inferior_list_entry
*entry
, void *args
)
1151 struct thread_info
*thread
= (struct thread_info
*) entry
;
1152 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1153 int pid
= * (int *) args
;
1156 if (ptid_get_pid (entry
->id
) != pid
)
1159 /* If there is a pending SIGSTOP, get rid of it. */
1160 if (lwp
->stop_expected
)
1163 debug_printf ("Sending SIGCONT to %s\n",
1164 target_pid_to_str (ptid_of (thread
)));
1166 kill_lwp (lwpid_of (thread
), SIGCONT
);
1167 lwp
->stop_expected
= 0;
1170 /* Flush any pending changes to the process's registers. */
1171 regcache_invalidate_thread (thread
);
1173 /* Pass on any pending signal for this thread. */
1174 sig
= get_detach_signal (thread
);
1176 /* Finally, let it resume. */
1177 if (the_low_target
.prepare_to_resume
!= NULL
)
1178 the_low_target
.prepare_to_resume (lwp
);
1179 if (ptrace (PTRACE_DETACH
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1180 (PTRACE_TYPE_ARG4
) (long) sig
) < 0)
1181 error (_("Can't detach %s: %s"),
1182 target_pid_to_str (ptid_of (thread
)),
1190 linux_detach (int pid
)
1192 struct process_info
*process
;
1194 process
= find_process_pid (pid
);
1195 if (process
== NULL
)
1198 /* Stop all threads before detaching. First, ptrace requires that
1199 the thread is stopped to sucessfully detach. Second, thread_db
1200 may need to uninstall thread event breakpoints from memory, which
1201 only works with a stopped process anyway. */
1202 stop_all_lwps (0, NULL
);
1204 #ifdef USE_THREAD_DB
1205 thread_db_detach (process
);
1208 /* Stabilize threads (move out of jump pads). */
1209 stabilize_threads ();
1211 find_inferior (&all_threads
, linux_detach_one_lwp
, &pid
);
1213 the_target
->mourn (process
);
1215 /* Since we presently can only stop all lwps of all processes, we
1216 need to unstop lwps of other processes. */
1217 unstop_all_lwps (0, NULL
);
1221 /* Remove all LWPs that belong to process PROC from the lwp list. */
1224 delete_lwp_callback (struct inferior_list_entry
*entry
, void *proc
)
1226 struct thread_info
*thread
= (struct thread_info
*) entry
;
1227 struct lwp_info
*lwp
= get_thread_lwp (thread
);
1228 struct process_info
*process
= proc
;
1230 if (pid_of (thread
) == pid_of (process
))
1237 linux_mourn (struct process_info
*process
)
1239 struct process_info_private
*priv
;
1241 #ifdef USE_THREAD_DB
1242 thread_db_mourn (process
);
1245 find_inferior (&all_threads
, delete_lwp_callback
, process
);
1247 /* Freeing all private data. */
1248 priv
= process
->priv
;
1249 free (priv
->arch_private
);
1251 process
->priv
= NULL
;
1253 remove_process (process
);
1257 linux_join (int pid
)
1262 ret
= my_waitpid (pid
, &status
, 0);
1263 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1265 } while (ret
!= -1 || errno
!= ECHILD
);
1268 /* Return nonzero if the given thread is still alive. */
1270 linux_thread_alive (ptid_t ptid
)
1272 struct lwp_info
*lwp
= find_lwp_pid (ptid
);
1274 /* We assume we always know if a thread exits. If a whole process
1275 exited but we still haven't been able to report it to GDB, we'll
1276 hold on to the last lwp of the dead process. */
1283 /* Return 1 if this lwp still has an interesting status pending. If
1284 not (e.g., it had stopped for a breakpoint that is gone), return
1288 thread_still_has_status_pending_p (struct thread_info
*thread
)
1290 struct lwp_info
*lp
= get_thread_lwp (thread
);
1292 if (!lp
->status_pending_p
)
1295 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1296 report any status pending the LWP may have. */
1297 if (thread
->last_resume_kind
== resume_stop
1298 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
1301 if (thread
->last_resume_kind
!= resume_stop
1302 && (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1303 || lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
1305 struct thread_info
*saved_thread
;
1309 gdb_assert (lp
->last_status
!= 0);
1313 saved_thread
= current_thread
;
1314 current_thread
= thread
;
1316 if (pc
!= lp
->stop_pc
)
1319 debug_printf ("PC of %ld changed\n",
1324 #if !USE_SIGTRAP_SIGINFO
1325 else if (lp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
1326 && !(*the_low_target
.breakpoint_at
) (pc
))
1329 debug_printf ("previous SW breakpoint of %ld gone\n",
1333 else if (lp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
1334 && !hardware_breakpoint_inserted_here (pc
))
1337 debug_printf ("previous HW breakpoint of %ld gone\n",
1343 current_thread
= saved_thread
;
1348 debug_printf ("discarding pending breakpoint status\n");
1349 lp
->status_pending_p
= 0;
1357 /* Return 1 if this lwp has an interesting status pending. */
1359 status_pending_p_callback (struct inferior_list_entry
*entry
, void *arg
)
1361 struct thread_info
*thread
= (struct thread_info
*) entry
;
1362 struct lwp_info
*lp
= get_thread_lwp (thread
);
1363 ptid_t ptid
= * (ptid_t
*) arg
;
1365 /* Check if we're only interested in events from a specific process
1366 or a specific LWP. */
1367 if (!ptid_match (ptid_of (thread
), ptid
))
1370 if (lp
->status_pending_p
1371 && !thread_still_has_status_pending_p (thread
))
1373 linux_resume_one_lwp (lp
, lp
->stepping
, GDB_SIGNAL_0
, NULL
);
1377 return lp
->status_pending_p
;
1381 same_lwp (struct inferior_list_entry
*entry
, void *data
)
1383 ptid_t ptid
= *(ptid_t
*) data
;
1386 if (ptid_get_lwp (ptid
) != 0)
1387 lwp
= ptid_get_lwp (ptid
);
1389 lwp
= ptid_get_pid (ptid
);
1391 if (ptid_get_lwp (entry
->id
) == lwp
)
1398 find_lwp_pid (ptid_t ptid
)
1400 struct inferior_list_entry
*thread
1401 = find_inferior (&all_threads
, same_lwp
, &ptid
);
1406 return get_thread_lwp ((struct thread_info
*) thread
);
1409 /* Return the number of known LWPs in the tgid given by PID. */
1414 struct inferior_list_entry
*inf
, *tmp
;
1417 ALL_INFERIORS (&all_threads
, inf
, tmp
)
1419 if (ptid_get_pid (inf
->id
) == pid
)
1426 /* The arguments passed to iterate_over_lwps. */
1428 struct iterate_over_lwps_args
1430 /* The FILTER argument passed to iterate_over_lwps. */
1433 /* The CALLBACK argument passed to iterate_over_lwps. */
1434 iterate_over_lwps_ftype
*callback
;
1436 /* The DATA argument passed to iterate_over_lwps. */
1440 /* Callback for find_inferior used by iterate_over_lwps to filter
1441 calls to the callback supplied to that function. Returning a
1442 nonzero value causes find_inferiors to stop iterating and return
1443 the current inferior_list_entry. Returning zero indicates that
1444 find_inferiors should continue iterating. */
1447 iterate_over_lwps_filter (struct inferior_list_entry
*entry
, void *args_p
)
1449 struct iterate_over_lwps_args
*args
1450 = (struct iterate_over_lwps_args
*) args_p
;
1452 if (ptid_match (entry
->id
, args
->filter
))
1454 struct thread_info
*thr
= (struct thread_info
*) entry
;
1455 struct lwp_info
*lwp
= get_thread_lwp (thr
);
1457 return (*args
->callback
) (lwp
, args
->data
);
1463 /* See nat/linux-nat.h. */
1466 iterate_over_lwps (ptid_t filter
,
1467 iterate_over_lwps_ftype callback
,
1470 struct iterate_over_lwps_args args
= {filter
, callback
, data
};
1471 struct inferior_list_entry
*entry
;
1473 entry
= find_inferior (&all_threads
, iterate_over_lwps_filter
, &args
);
1477 return get_thread_lwp ((struct thread_info
*) entry
);
1480 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1481 their exits until all other threads in the group have exited. */
1484 check_zombie_leaders (void)
1486 struct process_info
*proc
, *tmp
;
1488 ALL_PROCESSES (proc
, tmp
)
1490 pid_t leader_pid
= pid_of (proc
);
1491 struct lwp_info
*leader_lp
;
1493 leader_lp
= find_lwp_pid (pid_to_ptid (leader_pid
));
1496 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1497 "num_lwps=%d, zombie=%d\n",
1498 leader_pid
, leader_lp
!= NULL
, num_lwps (leader_pid
),
1499 linux_proc_pid_is_zombie (leader_pid
));
1501 if (leader_lp
!= NULL
1502 /* Check if there are other threads in the group, as we may
1503 have raced with the inferior simply exiting. */
1504 && !last_thread_of_process_p (leader_pid
)
1505 && linux_proc_pid_is_zombie (leader_pid
))
1507 /* A leader zombie can mean one of two things:
1509 - It exited, and there's an exit status pending
1510 available, or only the leader exited (not the whole
1511 program). In the latter case, we can't waitpid the
1512 leader's exit status until all other threads are gone.
1514 - There are 3 or more threads in the group, and a thread
1515 other than the leader exec'd. On an exec, the Linux
1516 kernel destroys all other threads (except the execing
1517 one) in the thread group, and resets the execing thread's
1518 tid to the tgid. No exit notification is sent for the
1519 execing thread -- from the ptracer's perspective, it
1520 appears as though the execing thread just vanishes.
1521 Until we reap all other threads except the leader and the
1522 execing thread, the leader will be zombie, and the
1523 execing thread will be in `D (disc sleep)'. As soon as
1524 all other threads are reaped, the execing thread changes
1525 it's tid to the tgid, and the previous (zombie) leader
1526 vanishes, giving place to the "new" leader. We could try
1527 distinguishing the exit and exec cases, by waiting once
1528 more, and seeing if something comes out, but it doesn't
1529 sound useful. The previous leader _does_ go away, and
1530 we'll re-add the new one once we see the exec event
1531 (which is just the same as what would happen if the
1532 previous leader did exit voluntarily before some other
1537 "CZL: Thread group leader %d zombie "
1538 "(it exited, or another thread execd).\n",
1541 delete_lwp (leader_lp
);
1546 /* Callback for `find_inferior'. Returns the first LWP that is not
1547 stopped. ARG is a PTID filter. */
1550 not_stopped_callback (struct inferior_list_entry
*entry
, void *arg
)
1552 struct thread_info
*thr
= (struct thread_info
*) entry
;
1553 struct lwp_info
*lwp
;
1554 ptid_t filter
= *(ptid_t
*) arg
;
1556 if (!ptid_match (ptid_of (thr
), filter
))
1559 lwp
= get_thread_lwp (thr
);
1566 /* This function should only be called if the LWP got a SIGTRAP.
1568 Handle any tracepoint steps or hits. Return true if a tracepoint
1569 event was handled, 0 otherwise. */
1572 handle_tracepoints (struct lwp_info
*lwp
)
1574 struct thread_info
*tinfo
= get_lwp_thread (lwp
);
1575 int tpoint_related_event
= 0;
1577 gdb_assert (lwp
->suspended
== 0);
1579 /* If this tracepoint hit causes a tracing stop, we'll immediately
1580 uninsert tracepoints. To do this, we temporarily pause all
1581 threads, unpatch away, and then unpause threads. We need to make
1582 sure the unpausing doesn't resume LWP too. */
1585 /* And we need to be sure that any all-threads-stopping doesn't try
1586 to move threads out of the jump pads, as it could deadlock the
1587 inferior (LWP could be in the jump pad, maybe even holding the
1590 /* Do any necessary step collect actions. */
1591 tpoint_related_event
|= tracepoint_finished_step (tinfo
, lwp
->stop_pc
);
1593 tpoint_related_event
|= handle_tracepoint_bkpts (tinfo
, lwp
->stop_pc
);
1595 /* See if we just hit a tracepoint and do its main collect
1597 tpoint_related_event
|= tracepoint_was_hit (tinfo
, lwp
->stop_pc
);
1601 gdb_assert (lwp
->suspended
== 0);
1602 gdb_assert (!stabilizing_threads
|| lwp
->collecting_fast_tracepoint
);
1604 if (tpoint_related_event
)
1607 debug_printf ("got a tracepoint event\n");
1614 /* Convenience wrapper. Returns true if LWP is presently collecting a
1618 linux_fast_tracepoint_collecting (struct lwp_info
*lwp
,
1619 struct fast_tpoint_collect_status
*status
)
1621 CORE_ADDR thread_area
;
1622 struct thread_info
*thread
= get_lwp_thread (lwp
);
1624 if (the_low_target
.get_thread_area
== NULL
)
1627 /* Get the thread area address. This is used to recognize which
1628 thread is which when tracing with the in-process agent library.
1629 We don't read anything from the address, and treat it as opaque;
1630 it's the address itself that we assume is unique per-thread. */
1631 if ((*the_low_target
.get_thread_area
) (lwpid_of (thread
), &thread_area
) == -1)
1634 return fast_tracepoint_collecting (thread_area
, lwp
->stop_pc
, status
);
1637 /* The reason we resume in the caller, is because we want to be able
1638 to pass lwp->status_pending as WSTAT, and we need to clear
1639 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1640 refuses to resume. */
1643 maybe_move_out_of_jump_pad (struct lwp_info
*lwp
, int *wstat
)
1645 struct thread_info
*saved_thread
;
1647 saved_thread
= current_thread
;
1648 current_thread
= get_lwp_thread (lwp
);
1651 || (WIFSTOPPED (*wstat
) && WSTOPSIG (*wstat
) != SIGTRAP
))
1652 && supports_fast_tracepoints ()
1653 && agent_loaded_p ())
1655 struct fast_tpoint_collect_status status
;
1659 debug_printf ("Checking whether LWP %ld needs to move out of the "
1661 lwpid_of (current_thread
));
1663 r
= linux_fast_tracepoint_collecting (lwp
, &status
);
1666 || (WSTOPSIG (*wstat
) != SIGILL
1667 && WSTOPSIG (*wstat
) != SIGFPE
1668 && WSTOPSIG (*wstat
) != SIGSEGV
1669 && WSTOPSIG (*wstat
) != SIGBUS
))
1671 lwp
->collecting_fast_tracepoint
= r
;
1675 if (r
== 1 && lwp
->exit_jump_pad_bkpt
== NULL
)
1677 /* Haven't executed the original instruction yet.
1678 Set breakpoint there, and wait till it's hit,
1679 then single-step until exiting the jump pad. */
1680 lwp
->exit_jump_pad_bkpt
1681 = set_breakpoint_at (status
.adjusted_insn_addr
, NULL
);
1685 debug_printf ("Checking whether LWP %ld needs to move out of "
1686 "the jump pad...it does\n",
1687 lwpid_of (current_thread
));
1688 current_thread
= saved_thread
;
1695 /* If we get a synchronous signal while collecting, *and*
1696 while executing the (relocated) original instruction,
1697 reset the PC to point at the tpoint address, before
1698 reporting to GDB. Otherwise, it's an IPA lib bug: just
1699 report the signal to GDB, and pray for the best. */
1701 lwp
->collecting_fast_tracepoint
= 0;
1704 && (status
.adjusted_insn_addr
<= lwp
->stop_pc
1705 && lwp
->stop_pc
< status
.adjusted_insn_addr_end
))
1708 struct regcache
*regcache
;
1710 /* The si_addr on a few signals references the address
1711 of the faulting instruction. Adjust that as
1713 if ((WSTOPSIG (*wstat
) == SIGILL
1714 || WSTOPSIG (*wstat
) == SIGFPE
1715 || WSTOPSIG (*wstat
) == SIGBUS
1716 || WSTOPSIG (*wstat
) == SIGSEGV
)
1717 && ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
1718 (PTRACE_TYPE_ARG3
) 0, &info
) == 0
1719 /* Final check just to make sure we don't clobber
1720 the siginfo of non-kernel-sent signals. */
1721 && (uintptr_t) info
.si_addr
== lwp
->stop_pc
)
1723 info
.si_addr
= (void *) (uintptr_t) status
.tpoint_addr
;
1724 ptrace (PTRACE_SETSIGINFO
, lwpid_of (current_thread
),
1725 (PTRACE_TYPE_ARG3
) 0, &info
);
1728 regcache
= get_thread_regcache (current_thread
, 1);
1729 (*the_low_target
.set_pc
) (regcache
, status
.tpoint_addr
);
1730 lwp
->stop_pc
= status
.tpoint_addr
;
1732 /* Cancel any fast tracepoint lock this thread was
1734 force_unlock_trace_buffer ();
1737 if (lwp
->exit_jump_pad_bkpt
!= NULL
)
1740 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1741 "stopping all threads momentarily.\n");
1743 stop_all_lwps (1, lwp
);
1745 delete_breakpoint (lwp
->exit_jump_pad_bkpt
);
1746 lwp
->exit_jump_pad_bkpt
= NULL
;
1748 unstop_all_lwps (1, lwp
);
1750 gdb_assert (lwp
->suspended
>= 0);
1756 debug_printf ("Checking whether LWP %ld needs to move out of the "
1758 lwpid_of (current_thread
));
1760 current_thread
= saved_thread
;
1764 /* Enqueue one signal in the "signals to report later when out of the
1768 enqueue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1770 struct pending_signals
*p_sig
;
1771 struct thread_info
*thread
= get_lwp_thread (lwp
);
1774 debug_printf ("Deferring signal %d for LWP %ld.\n",
1775 WSTOPSIG (*wstat
), lwpid_of (thread
));
1779 struct pending_signals
*sig
;
1781 for (sig
= lwp
->pending_signals_to_report
;
1784 debug_printf (" Already queued %d\n",
1787 debug_printf (" (no more currently queued signals)\n");
1790 /* Don't enqueue non-RT signals if they are already in the deferred
1791 queue. (SIGSTOP being the easiest signal to see ending up here
1793 if (WSTOPSIG (*wstat
) < __SIGRTMIN
)
1795 struct pending_signals
*sig
;
1797 for (sig
= lwp
->pending_signals_to_report
;
1801 if (sig
->signal
== WSTOPSIG (*wstat
))
1804 debug_printf ("Not requeuing already queued non-RT signal %d"
1813 p_sig
= xmalloc (sizeof (*p_sig
));
1814 p_sig
->prev
= lwp
->pending_signals_to_report
;
1815 p_sig
->signal
= WSTOPSIG (*wstat
);
1816 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
1817 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1820 lwp
->pending_signals_to_report
= p_sig
;
1823 /* Dequeue one signal from the "signals to report later when out of
1824 the jump pad" list. */
1827 dequeue_one_deferred_signal (struct lwp_info
*lwp
, int *wstat
)
1829 struct thread_info
*thread
= get_lwp_thread (lwp
);
1831 if (lwp
->pending_signals_to_report
!= NULL
)
1833 struct pending_signals
**p_sig
;
1835 p_sig
= &lwp
->pending_signals_to_report
;
1836 while ((*p_sig
)->prev
!= NULL
)
1837 p_sig
= &(*p_sig
)->prev
;
1839 *wstat
= W_STOPCODE ((*p_sig
)->signal
);
1840 if ((*p_sig
)->info
.si_signo
!= 0)
1841 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
1847 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1848 WSTOPSIG (*wstat
), lwpid_of (thread
));
1852 struct pending_signals
*sig
;
1854 for (sig
= lwp
->pending_signals_to_report
;
1857 debug_printf (" Still queued %d\n",
1860 debug_printf (" (no more queued signals)\n");
1869 /* Fetch the possibly triggered data watchpoint info and store it in
1872 On some archs, like x86, that use debug registers to set
1873 watchpoints, it's possible that the way to know which watched
1874 address trapped, is to check the register that is used to select
1875 which address to watch. Problem is, between setting the watchpoint
1876 and reading back which data address trapped, the user may change
1877 the set of watchpoints, and, as a consequence, GDB changes the
1878 debug registers in the inferior. To avoid reading back a stale
1879 stopped-data-address when that happens, we cache in LP the fact
1880 that a watchpoint trapped, and the corresponding data address, as
1881 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1882 registers meanwhile, we have the cached data we can rely on. */
1885 check_stopped_by_watchpoint (struct lwp_info
*child
)
1887 if (the_low_target
.stopped_by_watchpoint
!= NULL
)
1889 struct thread_info
*saved_thread
;
1891 saved_thread
= current_thread
;
1892 current_thread
= get_lwp_thread (child
);
1894 if (the_low_target
.stopped_by_watchpoint ())
1896 child
->stop_reason
= TARGET_STOPPED_BY_WATCHPOINT
;
1898 if (the_low_target
.stopped_data_address
!= NULL
)
1899 child
->stopped_data_address
1900 = the_low_target
.stopped_data_address ();
1902 child
->stopped_data_address
= 0;
1905 current_thread
= saved_thread
;
1908 return child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
1911 /* Do low-level handling of the event, and check if we should go on
1912 and pass it to caller code. Return the affected lwp if we are, or
1915 static struct lwp_info
*
1916 linux_low_filter_event (int lwpid
, int wstat
)
1918 struct lwp_info
*child
;
1919 struct thread_info
*thread
;
1920 int have_stop_pc
= 0;
1922 child
= find_lwp_pid (pid_to_ptid (lwpid
));
1924 /* If we didn't find a process, one of two things presumably happened:
1925 - A process we started and then detached from has exited. Ignore it.
1926 - A process we are controlling has forked and the new child's stop
1927 was reported to us by the kernel. Save its PID. */
1928 if (child
== NULL
&& WIFSTOPPED (wstat
))
1930 add_to_pid_list (&stopped_pids
, lwpid
, wstat
);
1933 else if (child
== NULL
)
1936 thread
= get_lwp_thread (child
);
1940 child
->last_status
= wstat
;
1942 /* Check if the thread has exited. */
1943 if ((WIFEXITED (wstat
) || WIFSIGNALED (wstat
)))
1946 debug_printf ("LLFE: %d exited.\n", lwpid
);
1947 if (num_lwps (pid_of (thread
)) > 1)
1950 /* If there is at least one more LWP, then the exit signal was
1951 not the end of the debugged application and should be
1958 /* This was the last lwp in the process. Since events are
1959 serialized to GDB core, and we can't report this one
1960 right now, but GDB core and the other target layers will
1961 want to be notified about the exit code/signal, leave the
1962 status pending for the next time we're able to report
1964 mark_lwp_dead (child
, wstat
);
1969 gdb_assert (WIFSTOPPED (wstat
));
1971 if (WIFSTOPPED (wstat
))
1973 struct process_info
*proc
;
1975 /* Architecture-specific setup after inferior is running. This
1976 needs to happen after we have attached to the inferior and it
1977 is stopped for the first time, but before we access any
1978 inferior registers. */
1979 proc
= find_process_pid (pid_of (thread
));
1980 if (proc
->priv
->new_inferior
)
1982 struct thread_info
*saved_thread
;
1984 saved_thread
= current_thread
;
1985 current_thread
= thread
;
1987 the_low_target
.arch_setup ();
1989 current_thread
= saved_thread
;
1991 proc
->priv
->new_inferior
= 0;
1995 if (WIFSTOPPED (wstat
) && child
->must_set_ptrace_flags
)
1997 struct process_info
*proc
= find_process_pid (pid_of (thread
));
1999 linux_enable_event_reporting (lwpid
, proc
->attached
);
2000 child
->must_set_ptrace_flags
= 0;
2003 /* Be careful to not overwrite stop_pc until
2004 check_stopped_by_breakpoint is called. */
2005 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2006 && linux_is_extended_waitstatus (wstat
))
2008 child
->stop_pc
= get_pc (child
);
2009 handle_extended_wait (child
, wstat
);
2013 /* Check first whether this was a SW/HW breakpoint before checking
2014 watchpoints, because at least s390 can't tell the data address of
2015 hardware watchpoint hits, and returns stopped-by-watchpoint as
2016 long as there's a watchpoint set. */
2017 if (WIFSTOPPED (wstat
) && linux_wstatus_maybe_breakpoint (wstat
))
2019 if (check_stopped_by_breakpoint (child
))
2023 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2024 or hardware watchpoint. Check which is which if we got
2025 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2026 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGTRAP
2027 && (child
->stop_reason
== TARGET_STOPPED_BY_NO_REASON
2028 || child
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
2029 check_stopped_by_watchpoint (child
);
2032 child
->stop_pc
= get_pc (child
);
2034 if (WIFSTOPPED (wstat
) && WSTOPSIG (wstat
) == SIGSTOP
2035 && child
->stop_expected
)
2038 debug_printf ("Expected stop.\n");
2039 child
->stop_expected
= 0;
2041 if (thread
->last_resume_kind
== resume_stop
)
2043 /* We want to report the stop to the core. Treat the
2044 SIGSTOP as a normal event. */
2046 else if (stopping_threads
!= NOT_STOPPING_THREADS
)
2048 /* Stopping threads. We don't want this SIGSTOP to end up
2054 /* Filter out the event. */
2055 linux_resume_one_lwp (child
, child
->stepping
, 0, NULL
);
2060 child
->status_pending_p
= 1;
2061 child
->status_pending
= wstat
;
2065 /* Resume LWPs that are currently stopped without any pending status
2066 to report, but are resumed from the core's perspective. */
2069 resume_stopped_resumed_lwps (struct inferior_list_entry
*entry
)
2071 struct thread_info
*thread
= (struct thread_info
*) entry
;
2072 struct lwp_info
*lp
= get_thread_lwp (thread
);
2075 && !lp
->status_pending_p
2076 && thread
->last_resume_kind
!= resume_stop
2077 && thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
)
2079 int step
= thread
->last_resume_kind
== resume_step
;
2082 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2083 target_pid_to_str (ptid_of (thread
)),
2084 paddress (lp
->stop_pc
),
2087 linux_resume_one_lwp (lp
, step
, GDB_SIGNAL_0
, NULL
);
2091 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2092 match FILTER_PTID (leaving others pending). The PTIDs can be:
2093 minus_one_ptid, to specify any child; a pid PTID, specifying all
2094 lwps of a thread group; or a PTID representing a single lwp. Store
2095 the stop status through the status pointer WSTAT. OPTIONS is
2096 passed to the waitpid call. Return 0 if no event was found and
2097 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2098 was found. Return the PID of the stopped child otherwise. */
2101 linux_wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
2102 int *wstatp
, int options
)
2104 struct thread_info
*event_thread
;
2105 struct lwp_info
*event_child
, *requested_child
;
2106 sigset_t block_mask
, prev_mask
;
2109 /* N.B. event_thread points to the thread_info struct that contains
2110 event_child. Keep them in sync. */
2111 event_thread
= NULL
;
2113 requested_child
= NULL
;
2115 /* Check for a lwp with a pending status. */
2117 if (ptid_equal (filter_ptid
, minus_one_ptid
) || ptid_is_pid (filter_ptid
))
2119 event_thread
= (struct thread_info
*)
2120 find_inferior (&all_threads
, status_pending_p_callback
, &filter_ptid
);
2121 if (event_thread
!= NULL
)
2122 event_child
= get_thread_lwp (event_thread
);
2123 if (debug_threads
&& event_thread
)
2124 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread
));
2126 else if (!ptid_equal (filter_ptid
, null_ptid
))
2128 requested_child
= find_lwp_pid (filter_ptid
);
2130 if (stopping_threads
== NOT_STOPPING_THREADS
2131 && requested_child
->status_pending_p
2132 && requested_child
->collecting_fast_tracepoint
)
2134 enqueue_one_deferred_signal (requested_child
,
2135 &requested_child
->status_pending
);
2136 requested_child
->status_pending_p
= 0;
2137 requested_child
->status_pending
= 0;
2138 linux_resume_one_lwp (requested_child
, 0, 0, NULL
);
2141 if (requested_child
->suspended
2142 && requested_child
->status_pending_p
)
2144 internal_error (__FILE__
, __LINE__
,
2145 "requesting an event out of a"
2146 " suspended child?");
2149 if (requested_child
->status_pending_p
)
2151 event_child
= requested_child
;
2152 event_thread
= get_lwp_thread (event_child
);
2156 if (event_child
!= NULL
)
2159 debug_printf ("Got an event from pending child %ld (%04x)\n",
2160 lwpid_of (event_thread
), event_child
->status_pending
);
2161 *wstatp
= event_child
->status_pending
;
2162 event_child
->status_pending_p
= 0;
2163 event_child
->status_pending
= 0;
2164 current_thread
= event_thread
;
2165 return lwpid_of (event_thread
);
2168 /* But if we don't find a pending event, we'll have to wait.
2170 We only enter this loop if no process has a pending wait status.
2171 Thus any action taken in response to a wait status inside this
2172 loop is responding as soon as we detect the status, not after any
2175 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2176 all signals while here. */
2177 sigfillset (&block_mask
);
2178 sigprocmask (SIG_BLOCK
, &block_mask
, &prev_mask
);
2180 /* Always pull all events out of the kernel. We'll randomly select
2181 an event LWP out of all that have events, to prevent
2183 while (event_child
== NULL
)
2187 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2190 - If the thread group leader exits while other threads in the
2191 thread group still exist, waitpid(TGID, ...) hangs. That
2192 waitpid won't return an exit status until the other threads
2193 in the group are reaped.
2195 - When a non-leader thread execs, that thread just vanishes
2196 without reporting an exit (so we'd hang if we waited for it
2197 explicitly in that case). The exec event is reported to
2198 the TGID pid (although we don't currently enable exec
2201 ret
= my_waitpid (-1, wstatp
, options
| WNOHANG
);
2204 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2205 ret
, errno
? strerror (errno
) : "ERRNO-OK");
2211 debug_printf ("LLW: waitpid %ld received %s\n",
2212 (long) ret
, status_to_str (*wstatp
));
2215 /* Filter all events. IOW, leave all events pending. We'll
2216 randomly select an event LWP out of all that have events
2218 linux_low_filter_event (ret
, *wstatp
);
2219 /* Retry until nothing comes out of waitpid. A single
2220 SIGCHLD can indicate more than one child stopped. */
2224 /* Now that we've pulled all events out of the kernel, resume
2225 LWPs that don't have an interesting event to report. */
2226 if (stopping_threads
== NOT_STOPPING_THREADS
)
2227 for_each_inferior (&all_threads
, resume_stopped_resumed_lwps
);
2229 /* ... and find an LWP with a status to report to the core, if
2231 event_thread
= (struct thread_info
*)
2232 find_inferior (&all_threads
, status_pending_p_callback
, &filter_ptid
);
2233 if (event_thread
!= NULL
)
2235 event_child
= get_thread_lwp (event_thread
);
2236 *wstatp
= event_child
->status_pending
;
2237 event_child
->status_pending_p
= 0;
2238 event_child
->status_pending
= 0;
2242 /* Check for zombie thread group leaders. Those can't be reaped
2243 until all other threads in the thread group are. */
2244 check_zombie_leaders ();
2246 /* If there are no resumed children left in the set of LWPs we
2247 want to wait for, bail. We can't just block in
2248 waitpid/sigsuspend, because lwps might have been left stopped
2249 in trace-stop state, and we'd be stuck forever waiting for
2250 their status to change (which would only happen if we resumed
2251 them). Even if WNOHANG is set, this return code is preferred
2252 over 0 (below), as it is more detailed. */
2253 if ((find_inferior (&all_threads
,
2254 not_stopped_callback
,
2255 &wait_ptid
) == NULL
))
2258 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2259 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2263 /* No interesting event to report to the caller. */
2264 if ((options
& WNOHANG
))
2267 debug_printf ("WNOHANG set, no event found\n");
2269 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2273 /* Block until we get an event reported with SIGCHLD. */
2275 debug_printf ("sigsuspend'ing\n");
2277 sigsuspend (&prev_mask
);
2278 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2282 sigprocmask (SIG_SETMASK
, &prev_mask
, NULL
);
2284 current_thread
= event_thread
;
2286 /* Check for thread exit. */
2287 if (! WIFSTOPPED (*wstatp
))
2289 gdb_assert (last_thread_of_process_p (pid_of (event_thread
)));
2292 debug_printf ("LWP %d is the last lwp of process. "
2293 "Process %ld exiting.\n",
2294 pid_of (event_thread
), lwpid_of (event_thread
));
2295 return lwpid_of (event_thread
);
2298 return lwpid_of (event_thread
);
2301 /* Wait for an event from child(ren) PTID. PTIDs can be:
2302 minus_one_ptid, to specify any child; a pid PTID, specifying all
2303 lwps of a thread group; or a PTID representing a single lwp. Store
2304 the stop status through the status pointer WSTAT. OPTIONS is
2305 passed to the waitpid call. Return 0 if no event was found and
2306 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2307 was found. Return the PID of the stopped child otherwise. */
2310 linux_wait_for_event (ptid_t ptid
, int *wstatp
, int options
)
2312 return linux_wait_for_event_filtered (ptid
, ptid
, wstatp
, options
);
2315 /* Count the LWP's that have had events. */
2318 count_events_callback (struct inferior_list_entry
*entry
, void *data
)
2320 struct thread_info
*thread
= (struct thread_info
*) entry
;
2321 struct lwp_info
*lp
= get_thread_lwp (thread
);
2324 gdb_assert (count
!= NULL
);
2326 /* Count only resumed LWPs that have an event pending. */
2327 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2328 && lp
->status_pending_p
)
2334 /* Select the LWP (if any) that is currently being single-stepped. */
2337 select_singlestep_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2339 struct thread_info
*thread
= (struct thread_info
*) entry
;
2340 struct lwp_info
*lp
= get_thread_lwp (thread
);
2342 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2343 && thread
->last_resume_kind
== resume_step
2344 && lp
->status_pending_p
)
2350 /* Select the Nth LWP that has had an event. */
2353 select_event_lwp_callback (struct inferior_list_entry
*entry
, void *data
)
2355 struct thread_info
*thread
= (struct thread_info
*) entry
;
2356 struct lwp_info
*lp
= get_thread_lwp (thread
);
2357 int *selector
= data
;
2359 gdb_assert (selector
!= NULL
);
2361 /* Select only resumed LWPs that have an event pending. */
2362 if (thread
->last_status
.kind
== TARGET_WAITKIND_IGNORE
2363 && lp
->status_pending_p
)
2364 if ((*selector
)-- == 0)
2370 /* Select one LWP out of those that have events pending. */
2373 select_event_lwp (struct lwp_info
**orig_lp
)
2376 int random_selector
;
2377 struct thread_info
*event_thread
= NULL
;
2379 /* In all-stop, give preference to the LWP that is being
2380 single-stepped. There will be at most one, and it's the LWP that
2381 the core is most interested in. If we didn't do this, then we'd
2382 have to handle pending step SIGTRAPs somehow in case the core
2383 later continues the previously-stepped thread, otherwise we'd
2384 report the pending SIGTRAP, and the core, not having stepped the
2385 thread, wouldn't understand what the trap was for, and therefore
2386 would report it to the user as a random signal. */
2390 = (struct thread_info
*) find_inferior (&all_threads
,
2391 select_singlestep_lwp_callback
,
2393 if (event_thread
!= NULL
)
2396 debug_printf ("SEL: Select single-step %s\n",
2397 target_pid_to_str (ptid_of (event_thread
)));
2400 if (event_thread
== NULL
)
2402 /* No single-stepping LWP. Select one at random, out of those
2403 which have had events. */
2405 /* First see how many events we have. */
2406 find_inferior (&all_threads
, count_events_callback
, &num_events
);
2407 gdb_assert (num_events
> 0);
2409 /* Now randomly pick a LWP out of those that have had
2411 random_selector
= (int)
2412 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2414 if (debug_threads
&& num_events
> 1)
2415 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2416 num_events
, random_selector
);
2419 = (struct thread_info
*) find_inferior (&all_threads
,
2420 select_event_lwp_callback
,
2424 if (event_thread
!= NULL
)
2426 struct lwp_info
*event_lp
= get_thread_lwp (event_thread
);
2428 /* Switch the event LWP. */
2429 *orig_lp
= event_lp
;
2433 /* Decrement the suspend count of an LWP. */
2436 unsuspend_one_lwp (struct inferior_list_entry
*entry
, void *except
)
2438 struct thread_info
*thread
= (struct thread_info
*) entry
;
2439 struct lwp_info
*lwp
= get_thread_lwp (thread
);
2441 /* Ignore EXCEPT. */
2447 gdb_assert (lwp
->suspended
>= 0);
2451 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2455 unsuspend_all_lwps (struct lwp_info
*except
)
2457 find_inferior (&all_threads
, unsuspend_one_lwp
, except
);
2460 static void move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
);
2461 static int stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
,
2463 static int lwp_running (struct inferior_list_entry
*entry
, void *data
);
2464 static ptid_t
linux_wait_1 (ptid_t ptid
,
2465 struct target_waitstatus
*ourstatus
,
2466 int target_options
);
2468 /* Stabilize threads (move out of jump pads).
2470 If a thread is midway collecting a fast tracepoint, we need to
2471 finish the collection and move it out of the jump pad before
2472 reporting the signal.
2474 This avoids recursion while collecting (when a signal arrives
2475 midway, and the signal handler itself collects), which would trash
2476 the trace buffer. In case the user set a breakpoint in a signal
2477 handler, this avoids the backtrace showing the jump pad, etc..
2478 Most importantly, there are certain things we can't do safely if
2479 threads are stopped in a jump pad (or in its callee's). For
2482 - starting a new trace run. A thread still collecting the
2483 previous run, could trash the trace buffer when resumed. The trace
2484 buffer control structures would have been reset but the thread had
2485 no way to tell. The thread could even midway memcpy'ing to the
2486 buffer, which would mean that when resumed, it would clobber the
2487 trace buffer that had been set for a new run.
2489 - we can't rewrite/reuse the jump pads for new tracepoints
2490 safely. Say you do tstart while a thread is stopped midway while
2491 collecting. When the thread is later resumed, it finishes the
2492 collection, and returns to the jump pad, to execute the original
2493 instruction that was under the tracepoint jump at the time the
2494 older run had been started. If the jump pad had been rewritten
2495 since for something else in the new run, the thread would now
2496 execute the wrong / random instructions. */
2499 linux_stabilize_threads (void)
2501 struct thread_info
*saved_thread
;
2502 struct thread_info
*thread_stuck
;
2505 = (struct thread_info
*) find_inferior (&all_threads
,
2506 stuck_in_jump_pad_callback
,
2508 if (thread_stuck
!= NULL
)
2511 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2512 lwpid_of (thread_stuck
));
2516 saved_thread
= current_thread
;
2518 stabilizing_threads
= 1;
2521 for_each_inferior (&all_threads
, move_out_of_jump_pad_callback
);
2523 /* Loop until all are stopped out of the jump pads. */
2524 while (find_inferior (&all_threads
, lwp_running
, NULL
) != NULL
)
2526 struct target_waitstatus ourstatus
;
2527 struct lwp_info
*lwp
;
2530 /* Note that we go through the full wait even loop. While
2531 moving threads out of jump pad, we need to be able to step
2532 over internal breakpoints and such. */
2533 linux_wait_1 (minus_one_ptid
, &ourstatus
, 0);
2535 if (ourstatus
.kind
== TARGET_WAITKIND_STOPPED
)
2537 lwp
= get_thread_lwp (current_thread
);
2542 if (ourstatus
.value
.sig
!= GDB_SIGNAL_0
2543 || current_thread
->last_resume_kind
== resume_stop
)
2545 wstat
= W_STOPCODE (gdb_signal_to_host (ourstatus
.value
.sig
));
2546 enqueue_one_deferred_signal (lwp
, &wstat
);
2551 find_inferior (&all_threads
, unsuspend_one_lwp
, NULL
);
2553 stabilizing_threads
= 0;
2555 current_thread
= saved_thread
;
2560 = (struct thread_info
*) find_inferior (&all_threads
,
2561 stuck_in_jump_pad_callback
,
2563 if (thread_stuck
!= NULL
)
2564 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2565 lwpid_of (thread_stuck
));
2569 static void async_file_mark (void);
2571 /* Convenience function that is called when the kernel reports an
2572 event that is not passed out to GDB. */
2575 ignore_event (struct target_waitstatus
*ourstatus
)
2577 /* If we got an event, there may still be others, as a single
2578 SIGCHLD can indicate more than one child stopped. This forces
2579 another target_wait call. */
2582 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2586 /* Wait for process, returns status. */
2589 linux_wait_1 (ptid_t ptid
,
2590 struct target_waitstatus
*ourstatus
, int target_options
)
2593 struct lwp_info
*event_child
;
2596 int step_over_finished
;
2597 int bp_explains_trap
;
2598 int maybe_internal_trap
;
2606 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid
));
2609 /* Translate generic target options into linux options. */
2611 if (target_options
& TARGET_WNOHANG
)
2614 bp_explains_trap
= 0;
2617 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2619 if (ptid_equal (step_over_bkpt
, null_ptid
))
2620 pid
= linux_wait_for_event (ptid
, &w
, options
);
2624 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2625 target_pid_to_str (step_over_bkpt
));
2626 pid
= linux_wait_for_event (step_over_bkpt
, &w
, options
& ~WNOHANG
);
2631 gdb_assert (target_options
& TARGET_WNOHANG
);
2635 debug_printf ("linux_wait_1 ret = null_ptid, "
2636 "TARGET_WAITKIND_IGNORE\n");
2640 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2647 debug_printf ("linux_wait_1 ret = null_ptid, "
2648 "TARGET_WAITKIND_NO_RESUMED\n");
2652 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
2656 event_child
= get_thread_lwp (current_thread
);
2658 /* linux_wait_for_event only returns an exit status for the last
2659 child of a process. Report it. */
2660 if (WIFEXITED (w
) || WIFSIGNALED (w
))
2664 ourstatus
->kind
= TARGET_WAITKIND_EXITED
;
2665 ourstatus
->value
.integer
= WEXITSTATUS (w
);
2669 debug_printf ("linux_wait_1 ret = %s, exited with "
2671 target_pid_to_str (ptid_of (current_thread
)),
2678 ourstatus
->kind
= TARGET_WAITKIND_SIGNALLED
;
2679 ourstatus
->value
.sig
= gdb_signal_from_host (WTERMSIG (w
));
2683 debug_printf ("linux_wait_1 ret = %s, terminated with "
2685 target_pid_to_str (ptid_of (current_thread
)),
2691 return ptid_of (current_thread
);
2694 /* If step-over executes a breakpoint instruction, it means a
2695 gdb/gdbserver breakpoint had been planted on top of a permanent
2696 breakpoint. The PC has been adjusted by
2697 check_stopped_by_breakpoint to point at the breakpoint address.
2698 Advance the PC manually past the breakpoint, otherwise the
2699 program would keep trapping the permanent breakpoint forever. */
2700 if (!ptid_equal (step_over_bkpt
, null_ptid
)
2701 && event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
)
2703 unsigned int increment_pc
= the_low_target
.breakpoint_len
;
2707 debug_printf ("step-over for %s executed software breakpoint\n",
2708 target_pid_to_str (ptid_of (current_thread
)));
2711 if (increment_pc
!= 0)
2713 struct regcache
*regcache
2714 = get_thread_regcache (current_thread
, 1);
2716 event_child
->stop_pc
+= increment_pc
;
2717 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
2719 if (!(*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))
2720 event_child
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
2724 /* If this event was not handled before, and is not a SIGTRAP, we
2725 report it. SIGILL and SIGSEGV are also treated as traps in case
2726 a breakpoint is inserted at the current PC. If this target does
2727 not support internal breakpoints at all, we also report the
2728 SIGTRAP without further processing; it's of no concern to us. */
2730 = (supports_breakpoints ()
2731 && (WSTOPSIG (w
) == SIGTRAP
2732 || ((WSTOPSIG (w
) == SIGILL
2733 || WSTOPSIG (w
) == SIGSEGV
)
2734 && (*the_low_target
.breakpoint_at
) (event_child
->stop_pc
))));
2736 if (maybe_internal_trap
)
2738 /* Handle anything that requires bookkeeping before deciding to
2739 report the event or continue waiting. */
2741 /* First check if we can explain the SIGTRAP with an internal
2742 breakpoint, or if we should possibly report the event to GDB.
2743 Do this before anything that may remove or insert a
2745 bp_explains_trap
= breakpoint_inserted_here (event_child
->stop_pc
);
2747 /* We have a SIGTRAP, possibly a step-over dance has just
2748 finished. If so, tweak the state machine accordingly,
2749 reinsert breakpoints and delete any reinsert (software
2750 single-step) breakpoints. */
2751 step_over_finished
= finish_step_over (event_child
);
2753 /* Now invoke the callbacks of any internal breakpoints there. */
2754 check_breakpoints (event_child
->stop_pc
);
2756 /* Handle tracepoint data collecting. This may overflow the
2757 trace buffer, and cause a tracing stop, removing
2759 trace_event
= handle_tracepoints (event_child
);
2761 if (bp_explains_trap
)
2763 /* If we stepped or ran into an internal breakpoint, we've
2764 already handled it. So next time we resume (from this
2765 PC), we should step over it. */
2767 debug_printf ("Hit a gdbserver breakpoint.\n");
2769 if (breakpoint_here (event_child
->stop_pc
))
2770 event_child
->need_step_over
= 1;
2775 /* We have some other signal, possibly a step-over dance was in
2776 progress, and it should be cancelled too. */
2777 step_over_finished
= finish_step_over (event_child
);
2780 /* We have all the data we need. Either report the event to GDB, or
2781 resume threads and keep waiting for more. */
2783 /* If we're collecting a fast tracepoint, finish the collection and
2784 move out of the jump pad before delivering a signal. See
2785 linux_stabilize_threads. */
2788 && WSTOPSIG (w
) != SIGTRAP
2789 && supports_fast_tracepoints ()
2790 && agent_loaded_p ())
2793 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2794 "to defer or adjust it.\n",
2795 WSTOPSIG (w
), lwpid_of (current_thread
));
2797 /* Allow debugging the jump pad itself. */
2798 if (current_thread
->last_resume_kind
!= resume_step
2799 && maybe_move_out_of_jump_pad (event_child
, &w
))
2801 enqueue_one_deferred_signal (event_child
, &w
);
2804 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2805 WSTOPSIG (w
), lwpid_of (current_thread
));
2807 linux_resume_one_lwp (event_child
, 0, 0, NULL
);
2809 return ignore_event (ourstatus
);
2813 if (event_child
->collecting_fast_tracepoint
)
2816 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2817 "Check if we're already there.\n",
2818 lwpid_of (current_thread
),
2819 event_child
->collecting_fast_tracepoint
);
2823 event_child
->collecting_fast_tracepoint
2824 = linux_fast_tracepoint_collecting (event_child
, NULL
);
2826 if (event_child
->collecting_fast_tracepoint
!= 1)
2828 /* No longer need this breakpoint. */
2829 if (event_child
->exit_jump_pad_bkpt
!= NULL
)
2832 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2833 "stopping all threads momentarily.\n");
2835 /* Other running threads could hit this breakpoint.
2836 We don't handle moribund locations like GDB does,
2837 instead we always pause all threads when removing
2838 breakpoints, so that any step-over or
2839 decr_pc_after_break adjustment is always taken
2840 care of while the breakpoint is still
2842 stop_all_lwps (1, event_child
);
2844 delete_breakpoint (event_child
->exit_jump_pad_bkpt
);
2845 event_child
->exit_jump_pad_bkpt
= NULL
;
2847 unstop_all_lwps (1, event_child
);
2849 gdb_assert (event_child
->suspended
>= 0);
2853 if (event_child
->collecting_fast_tracepoint
== 0)
2856 debug_printf ("fast tracepoint finished "
2857 "collecting successfully.\n");
2859 /* We may have a deferred signal to report. */
2860 if (dequeue_one_deferred_signal (event_child
, &w
))
2863 debug_printf ("dequeued one signal.\n");
2868 debug_printf ("no deferred signals.\n");
2870 if (stabilizing_threads
)
2872 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
2873 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
2877 debug_printf ("linux_wait_1 ret = %s, stopped "
2878 "while stabilizing threads\n",
2879 target_pid_to_str (ptid_of (current_thread
)));
2883 return ptid_of (current_thread
);
2889 /* Check whether GDB would be interested in this event. */
2891 /* If GDB is not interested in this signal, don't stop other
2892 threads, and don't report it to GDB. Just resume the inferior
2893 right away. We do this for threading-related signals as well as
2894 any that GDB specifically requested we ignore. But never ignore
2895 SIGSTOP if we sent it ourselves, and do not ignore signals when
2896 stepping - they may require special handling to skip the signal
2897 handler. Also never ignore signals that could be caused by a
2899 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2902 && current_thread
->last_resume_kind
!= resume_step
2904 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2905 (current_process ()->priv
->thread_db
!= NULL
2906 && (WSTOPSIG (w
) == __SIGRTMIN
2907 || WSTOPSIG (w
) == __SIGRTMIN
+ 1))
2910 (pass_signals
[gdb_signal_from_host (WSTOPSIG (w
))]
2911 && !(WSTOPSIG (w
) == SIGSTOP
2912 && current_thread
->last_resume_kind
== resume_stop
)
2913 && !linux_wstatus_maybe_breakpoint (w
))))
2915 siginfo_t info
, *info_p
;
2918 debug_printf ("Ignored signal %d for LWP %ld.\n",
2919 WSTOPSIG (w
), lwpid_of (current_thread
));
2921 if (ptrace (PTRACE_GETSIGINFO
, lwpid_of (current_thread
),
2922 (PTRACE_TYPE_ARG3
) 0, &info
) == 0)
2926 linux_resume_one_lwp (event_child
, event_child
->stepping
,
2927 WSTOPSIG (w
), info_p
);
2928 return ignore_event (ourstatus
);
2931 /* Note that all addresses are always "out of the step range" when
2932 there's no range to begin with. */
2933 in_step_range
= lwp_in_step_range (event_child
);
2935 /* If GDB wanted this thread to single step, and the thread is out
2936 of the step range, we always want to report the SIGTRAP, and let
2937 GDB handle it. Watchpoints should always be reported. So should
2938 signals we can't explain. A SIGTRAP we can't explain could be a
2939 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2940 do, we're be able to handle GDB breakpoints on top of internal
2941 breakpoints, by handling the internal breakpoint and still
2942 reporting the event to GDB. If we don't, we're out of luck, GDB
2943 won't see the breakpoint hit. */
2944 report_to_gdb
= (!maybe_internal_trap
2945 || (current_thread
->last_resume_kind
== resume_step
2947 || event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
2948 || (!step_over_finished
&& !in_step_range
2949 && !bp_explains_trap
&& !trace_event
)
2950 || (gdb_breakpoint_here (event_child
->stop_pc
)
2951 && gdb_condition_true_at_breakpoint (event_child
->stop_pc
)
2952 && gdb_no_commands_at_breakpoint (event_child
->stop_pc
)));
2954 run_breakpoint_commands (event_child
->stop_pc
);
2956 /* We found no reason GDB would want us to stop. We either hit one
2957 of our own breakpoints, or finished an internal step GDB
2958 shouldn't know about. */
2963 if (bp_explains_trap
)
2964 debug_printf ("Hit a gdbserver breakpoint.\n");
2965 if (step_over_finished
)
2966 debug_printf ("Step-over finished.\n");
2968 debug_printf ("Tracepoint event.\n");
2969 if (lwp_in_step_range (event_child
))
2970 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2971 paddress (event_child
->stop_pc
),
2972 paddress (event_child
->step_range_start
),
2973 paddress (event_child
->step_range_end
));
2976 /* We're not reporting this breakpoint to GDB, so apply the
2977 decr_pc_after_break adjustment to the inferior's regcache
2980 if (the_low_target
.set_pc
!= NULL
)
2982 struct regcache
*regcache
2983 = get_thread_regcache (current_thread
, 1);
2984 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
);
2987 /* We may have finished stepping over a breakpoint. If so,
2988 we've stopped and suspended all LWPs momentarily except the
2989 stepping one. This is where we resume them all again. We're
2990 going to keep waiting, so use proceed, which handles stepping
2991 over the next breakpoint. */
2993 debug_printf ("proceeding all threads.\n");
2995 if (step_over_finished
)
2996 unsuspend_all_lwps (event_child
);
2998 proceed_all_lwps ();
2999 return ignore_event (ourstatus
);
3004 if (current_thread
->last_resume_kind
== resume_step
)
3006 if (event_child
->step_range_start
== event_child
->step_range_end
)
3007 debug_printf ("GDB wanted to single-step, reporting event.\n");
3008 else if (!lwp_in_step_range (event_child
))
3009 debug_printf ("Out of step range, reporting event.\n");
3011 if (event_child
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
)
3012 debug_printf ("Stopped by watchpoint.\n");
3013 else if (gdb_breakpoint_here (event_child
->stop_pc
))
3014 debug_printf ("Stopped by GDB breakpoint.\n");
3016 debug_printf ("Hit a non-gdbserver trap event.\n");
3019 /* Alright, we're going to report a stop. */
3021 if (!stabilizing_threads
)
3023 /* In all-stop, stop all threads. */
3025 stop_all_lwps (0, NULL
);
3027 /* If we're not waiting for a specific LWP, choose an event LWP
3028 from among those that have had events. Giving equal priority
3029 to all LWPs that have had events helps prevent
3031 if (ptid_equal (ptid
, minus_one_ptid
))
3033 event_child
->status_pending_p
= 1;
3034 event_child
->status_pending
= w
;
3036 select_event_lwp (&event_child
);
3038 /* current_thread and event_child must stay in sync. */
3039 current_thread
= get_lwp_thread (event_child
);
3041 event_child
->status_pending_p
= 0;
3042 w
= event_child
->status_pending
;
3045 if (step_over_finished
)
3049 /* If we were doing a step-over, all other threads but
3050 the stepping one had been paused in start_step_over,
3051 with their suspend counts incremented. We don't want
3052 to do a full unstop/unpause, because we're in
3053 all-stop mode (so we want threads stopped), but we
3054 still need to unsuspend the other threads, to
3055 decrement their `suspended' count back. */
3056 unsuspend_all_lwps (event_child
);
3060 /* If we just finished a step-over, then all threads had
3061 been momentarily paused. In all-stop, that's fine,
3062 we want threads stopped by now anyway. In non-stop,
3063 we need to re-resume threads that GDB wanted to be
3065 unstop_all_lwps (1, event_child
);
3069 /* Stabilize threads (move out of jump pads). */
3071 stabilize_threads ();
3075 /* If we just finished a step-over, then all threads had been
3076 momentarily paused. In all-stop, that's fine, we want
3077 threads stopped by now anyway. In non-stop, we need to
3078 re-resume threads that GDB wanted to be running. */
3079 if (step_over_finished
)
3080 unstop_all_lwps (1, event_child
);
3083 ourstatus
->kind
= TARGET_WAITKIND_STOPPED
;
3085 /* Now that we've selected our final event LWP, un-adjust its PC if
3086 it was a software breakpoint, and the client doesn't know we can
3087 adjust the breakpoint ourselves. */
3088 if (event_child
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3089 && !swbreak_feature
)
3091 int decr_pc
= the_low_target
.decr_pc_after_break
;
3095 struct regcache
*regcache
3096 = get_thread_regcache (current_thread
, 1);
3097 (*the_low_target
.set_pc
) (regcache
, event_child
->stop_pc
+ decr_pc
);
3101 if (current_thread
->last_resume_kind
== resume_stop
3102 && WSTOPSIG (w
) == SIGSTOP
)
3104 /* A thread that has been requested to stop by GDB with vCont;t,
3105 and it stopped cleanly, so report as SIG0. The use of
3106 SIGSTOP is an implementation detail. */
3107 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3109 else if (current_thread
->last_resume_kind
== resume_stop
3110 && WSTOPSIG (w
) != SIGSTOP
)
3112 /* A thread that has been requested to stop by GDB with vCont;t,
3113 but, it stopped for other reasons. */
3114 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3118 ourstatus
->value
.sig
= gdb_signal_from_host (WSTOPSIG (w
));
3121 gdb_assert (ptid_equal (step_over_bkpt
, null_ptid
));
3125 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3126 target_pid_to_str (ptid_of (current_thread
)),
3127 ourstatus
->kind
, ourstatus
->value
.sig
);
3131 return ptid_of (current_thread
);
3134 /* Get rid of any pending event in the pipe. */
3136 async_file_flush (void)
3142 ret
= read (linux_event_pipe
[0], &buf
, 1);
3143 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
3146 /* Put something in the pipe, so the event loop wakes up. */
3148 async_file_mark (void)
3152 async_file_flush ();
3155 ret
= write (linux_event_pipe
[1], "+", 1);
3156 while (ret
== 0 || (ret
== -1 && errno
== EINTR
));
3158 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3159 be awakened anyway. */
3163 linux_wait (ptid_t ptid
,
3164 struct target_waitstatus
*ourstatus
, int target_options
)
3168 /* Flush the async file first. */
3169 if (target_is_async_p ())
3170 async_file_flush ();
3174 event_ptid
= linux_wait_1 (ptid
, ourstatus
, target_options
);
3176 while ((target_options
& TARGET_WNOHANG
) == 0
3177 && ptid_equal (event_ptid
, null_ptid
)
3178 && ourstatus
->kind
== TARGET_WAITKIND_IGNORE
);
3180 /* If at least one stop was reported, there may be more. A single
3181 SIGCHLD can signal more than one child stop. */
3182 if (target_is_async_p ()
3183 && (target_options
& TARGET_WNOHANG
) != 0
3184 && !ptid_equal (event_ptid
, null_ptid
))
3190 /* Send a signal to an LWP. */
3193 kill_lwp (unsigned long lwpid
, int signo
)
3195 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3196 fails, then we are not using nptl threads and we should be using kill. */
3200 static int tkill_failed
;
3207 ret
= syscall (__NR_tkill
, lwpid
, signo
);
3208 if (errno
!= ENOSYS
)
3215 return kill (lwpid
, signo
);
3219 linux_stop_lwp (struct lwp_info
*lwp
)
3225 send_sigstop (struct lwp_info
*lwp
)
3229 pid
= lwpid_of (get_lwp_thread (lwp
));
3231 /* If we already have a pending stop signal for this process, don't
3233 if (lwp
->stop_expected
)
3236 debug_printf ("Have pending sigstop for lwp %d\n", pid
);
3242 debug_printf ("Sending sigstop to lwp %d\n", pid
);
3244 lwp
->stop_expected
= 1;
3245 kill_lwp (pid
, SIGSTOP
);
3249 send_sigstop_callback (struct inferior_list_entry
*entry
, void *except
)
3251 struct thread_info
*thread
= (struct thread_info
*) entry
;
3252 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3254 /* Ignore EXCEPT. */
3265 /* Increment the suspend count of an LWP, and stop it, if not stopped
3268 suspend_and_send_sigstop_callback (struct inferior_list_entry
*entry
,
3271 struct thread_info
*thread
= (struct thread_info
*) entry
;
3272 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3274 /* Ignore EXCEPT. */
3280 return send_sigstop_callback (entry
, except
);
3284 mark_lwp_dead (struct lwp_info
*lwp
, int wstat
)
3286 /* It's dead, really. */
3289 /* Store the exit status for later. */
3290 lwp
->status_pending_p
= 1;
3291 lwp
->status_pending
= wstat
;
3293 /* Prevent trying to stop it. */
3296 /* No further stops are expected from a dead lwp. */
3297 lwp
->stop_expected
= 0;
3300 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3303 wait_for_sigstop (void)
3305 struct thread_info
*saved_thread
;
3310 saved_thread
= current_thread
;
3311 if (saved_thread
!= NULL
)
3312 saved_tid
= saved_thread
->entry
.id
;
3314 saved_tid
= null_ptid
; /* avoid bogus unused warning */
3317 debug_printf ("wait_for_sigstop: pulling events\n");
3319 /* Passing NULL_PTID as filter indicates we want all events to be
3320 left pending. Eventually this returns when there are no
3321 unwaited-for children left. */
3322 ret
= linux_wait_for_event_filtered (minus_one_ptid
, null_ptid
,
3324 gdb_assert (ret
== -1);
3326 if (saved_thread
== NULL
|| linux_thread_alive (saved_tid
))
3327 current_thread
= saved_thread
;
3331 debug_printf ("Previously current thread died.\n");
3335 /* We can't change the current inferior behind GDB's back,
3336 otherwise, a subsequent command may apply to the wrong
3338 current_thread
= NULL
;
3342 /* Set a valid thread as current. */
3343 set_desired_thread (0);
3348 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3349 move it out, because we need to report the stop event to GDB. For
3350 example, if the user puts a breakpoint in the jump pad, it's
3351 because she wants to debug it. */
3354 stuck_in_jump_pad_callback (struct inferior_list_entry
*entry
, void *data
)
3356 struct thread_info
*thread
= (struct thread_info
*) entry
;
3357 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3359 gdb_assert (lwp
->suspended
== 0);
3360 gdb_assert (lwp
->stopped
);
3362 /* Allow debugging the jump pad, gdb_collect, etc.. */
3363 return (supports_fast_tracepoints ()
3364 && agent_loaded_p ()
3365 && (gdb_breakpoint_here (lwp
->stop_pc
)
3366 || lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
3367 || thread
->last_resume_kind
== resume_step
)
3368 && linux_fast_tracepoint_collecting (lwp
, NULL
));
3372 move_out_of_jump_pad_callback (struct inferior_list_entry
*entry
)
3374 struct thread_info
*thread
= (struct thread_info
*) entry
;
3375 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3378 gdb_assert (lwp
->suspended
== 0);
3379 gdb_assert (lwp
->stopped
);
3381 wstat
= lwp
->status_pending_p
? &lwp
->status_pending
: NULL
;
3383 /* Allow debugging the jump pad, gdb_collect, etc. */
3384 if (!gdb_breakpoint_here (lwp
->stop_pc
)
3385 && lwp
->stop_reason
!= TARGET_STOPPED_BY_WATCHPOINT
3386 && thread
->last_resume_kind
!= resume_step
3387 && maybe_move_out_of_jump_pad (lwp
, wstat
))
3390 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3395 lwp
->status_pending_p
= 0;
3396 enqueue_one_deferred_signal (lwp
, wstat
);
3399 debug_printf ("Signal %d for LWP %ld deferred "
3401 WSTOPSIG (*wstat
), lwpid_of (thread
));
3404 linux_resume_one_lwp (lwp
, 0, 0, NULL
);
3411 lwp_running (struct inferior_list_entry
*entry
, void *data
)
3413 struct thread_info
*thread
= (struct thread_info
*) entry
;
3414 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3423 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3424 If SUSPEND, then also increase the suspend count of every LWP,
3428 stop_all_lwps (int suspend
, struct lwp_info
*except
)
3430 /* Should not be called recursively. */
3431 gdb_assert (stopping_threads
== NOT_STOPPING_THREADS
);
3436 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3437 suspend
? "stop-and-suspend" : "stop",
3439 ? target_pid_to_str (ptid_of (get_lwp_thread (except
)))
3443 stopping_threads
= (suspend
3444 ? STOPPING_AND_SUSPENDING_THREADS
3445 : STOPPING_THREADS
);
3448 find_inferior (&all_threads
, suspend_and_send_sigstop_callback
, except
);
3450 find_inferior (&all_threads
, send_sigstop_callback
, except
);
3451 wait_for_sigstop ();
3452 stopping_threads
= NOT_STOPPING_THREADS
;
3456 debug_printf ("stop_all_lwps done, setting stopping_threads "
3457 "back to !stopping\n");
3462 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3463 SIGNAL is nonzero, give it that signal. */
3466 linux_resume_one_lwp_throw (struct lwp_info
*lwp
,
3467 int step
, int signal
, siginfo_t
*info
)
3469 struct thread_info
*thread
= get_lwp_thread (lwp
);
3470 struct thread_info
*saved_thread
;
3471 int fast_tp_collecting
;
3473 if (lwp
->stopped
== 0)
3476 fast_tp_collecting
= lwp
->collecting_fast_tracepoint
;
3478 gdb_assert (!stabilizing_threads
|| fast_tp_collecting
);
3480 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3481 user used the "jump" command, or "set $pc = foo"). */
3482 if (lwp
->stop_pc
!= get_pc (lwp
))
3484 /* Collecting 'while-stepping' actions doesn't make sense
3486 release_while_stepping_state_list (thread
);
3489 /* If we have pending signals or status, and a new signal, enqueue the
3490 signal. Also enqueue the signal if we are waiting to reinsert a
3491 breakpoint; it will be picked up again below. */
3493 && (lwp
->status_pending_p
3494 || lwp
->pending_signals
!= NULL
3495 || lwp
->bp_reinsert
!= 0
3496 || fast_tp_collecting
))
3498 struct pending_signals
*p_sig
;
3499 p_sig
= xmalloc (sizeof (*p_sig
));
3500 p_sig
->prev
= lwp
->pending_signals
;
3501 p_sig
->signal
= signal
;
3503 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
3505 memcpy (&p_sig
->info
, info
, sizeof (siginfo_t
));
3506 lwp
->pending_signals
= p_sig
;
3509 if (lwp
->status_pending_p
)
3512 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3513 " has pending status\n",
3514 lwpid_of (thread
), step
? "step" : "continue", signal
,
3515 lwp
->stop_expected
? "expected" : "not expected");
3519 saved_thread
= current_thread
;
3520 current_thread
= thread
;
3523 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3524 lwpid_of (thread
), step
? "step" : "continue", signal
,
3525 lwp
->stop_expected
? "expected" : "not expected");
3527 /* This bit needs some thinking about. If we get a signal that
3528 we must report while a single-step reinsert is still pending,
3529 we often end up resuming the thread. It might be better to
3530 (ew) allow a stack of pending events; then we could be sure that
3531 the reinsert happened right away and not lose any signals.
3533 Making this stack would also shrink the window in which breakpoints are
3534 uninserted (see comment in linux_wait_for_lwp) but not enough for
3535 complete correctness, so it won't solve that problem. It may be
3536 worthwhile just to solve this one, however. */
3537 if (lwp
->bp_reinsert
!= 0)
3540 debug_printf (" pending reinsert at 0x%s\n",
3541 paddress (lwp
->bp_reinsert
));
3543 if (can_hardware_single_step ())
3545 if (fast_tp_collecting
== 0)
3548 fprintf (stderr
, "BAD - reinserting but not stepping.\n");
3550 fprintf (stderr
, "BAD - reinserting and suspended(%d).\n",
3557 /* Postpone any pending signal. It was enqueued above. */
3561 if (fast_tp_collecting
== 1)
3564 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3565 " (exit-jump-pad-bkpt)\n",
3568 /* Postpone any pending signal. It was enqueued above. */
3571 else if (fast_tp_collecting
== 2)
3574 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3575 " single-stepping\n",
3578 if (can_hardware_single_step ())
3582 internal_error (__FILE__
, __LINE__
,
3583 "moving out of jump pad single-stepping"
3584 " not implemented on this target");
3587 /* Postpone any pending signal. It was enqueued above. */
3591 /* If we have while-stepping actions in this thread set it stepping.
3592 If we have a signal to deliver, it may or may not be set to
3593 SIG_IGN, we don't know. Assume so, and allow collecting
3594 while-stepping into a signal handler. A possible smart thing to
3595 do would be to set an internal breakpoint at the signal return
3596 address, continue, and carry on catching this while-stepping
3597 action only when that breakpoint is hit. A future
3599 if (thread
->while_stepping
!= NULL
3600 && can_hardware_single_step ())
3603 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3608 if (the_low_target
.get_pc
!= NULL
)
3610 struct regcache
*regcache
= get_thread_regcache (current_thread
, 1);
3612 lwp
->stop_pc
= (*the_low_target
.get_pc
) (regcache
);
3616 debug_printf (" %s from pc 0x%lx\n", step
? "step" : "continue",
3617 (long) lwp
->stop_pc
);
3621 /* If we have pending signals, consume one unless we are trying to
3622 reinsert a breakpoint or we're trying to finish a fast tracepoint
3624 if (lwp
->pending_signals
!= NULL
3625 && lwp
->bp_reinsert
== 0
3626 && fast_tp_collecting
== 0)
3628 struct pending_signals
**p_sig
;
3630 p_sig
= &lwp
->pending_signals
;
3631 while ((*p_sig
)->prev
!= NULL
)
3632 p_sig
= &(*p_sig
)->prev
;
3634 signal
= (*p_sig
)->signal
;
3635 if ((*p_sig
)->info
.si_signo
!= 0)
3636 ptrace (PTRACE_SETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
3643 if (the_low_target
.prepare_to_resume
!= NULL
)
3644 the_low_target
.prepare_to_resume (lwp
);
3646 regcache_invalidate_thread (thread
);
3648 lwp
->stepping
= step
;
3649 ptrace (step
? PTRACE_SINGLESTEP
: PTRACE_CONT
, lwpid_of (thread
),
3650 (PTRACE_TYPE_ARG3
) 0,
3651 /* Coerce to a uintptr_t first to avoid potential gcc warning
3652 of coercing an 8 byte integer to a 4 byte pointer. */
3653 (PTRACE_TYPE_ARG4
) (uintptr_t) signal
);
3655 current_thread
= saved_thread
;
3657 perror_with_name ("resuming thread");
3659 /* Successfully resumed. Clear state that no longer makes sense,
3660 and mark the LWP as running. Must not do this before resuming
3661 otherwise if that fails other code will be confused. E.g., we'd
3662 later try to stop the LWP and hang forever waiting for a stop
3663 status. Note that we must not throw after this is cleared,
3664 otherwise handle_zombie_lwp_error would get confused. */
3666 lwp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3669 /* Called when we try to resume a stopped LWP and that errors out. If
3670 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3671 or about to become), discard the error, clear any pending status
3672 the LWP may have, and return true (we'll collect the exit status
3673 soon enough). Otherwise, return false. */
3676 check_ptrace_stopped_lwp_gone (struct lwp_info
*lp
)
3678 struct thread_info
*thread
= get_lwp_thread (lp
);
3680 /* If we get an error after resuming the LWP successfully, we'd
3681 confuse !T state for the LWP being gone. */
3682 gdb_assert (lp
->stopped
);
3684 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3685 because even if ptrace failed with ESRCH, the tracee may be "not
3686 yet fully dead", but already refusing ptrace requests. In that
3687 case the tracee has 'R (Running)' state for a little bit
3688 (observed in Linux 3.18). See also the note on ESRCH in the
3689 ptrace(2) man page. Instead, check whether the LWP has any state
3690 other than ptrace-stopped. */
3692 /* Don't assume anything if /proc/PID/status can't be read. */
3693 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread
)) == 0)
3695 lp
->stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3696 lp
->status_pending_p
= 0;
3702 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3703 disappears while we try to resume it. */
3706 linux_resume_one_lwp (struct lwp_info
*lwp
,
3707 int step
, int signal
, siginfo_t
*info
)
3711 linux_resume_one_lwp_throw (lwp
, step
, signal
, info
);
3713 CATCH (ex
, RETURN_MASK_ERROR
)
3715 if (!check_ptrace_stopped_lwp_gone (lwp
))
3716 throw_exception (ex
);
3721 struct thread_resume_array
3723 struct thread_resume
*resume
;
3727 /* This function is called once per thread via find_inferior.
3728 ARG is a pointer to a thread_resume_array struct.
3729 We look up the thread specified by ENTRY in ARG, and mark the thread
3730 with a pointer to the appropriate resume request.
3732 This algorithm is O(threads * resume elements), but resume elements
3733 is small (and will remain small at least until GDB supports thread
3737 linux_set_resume_request (struct inferior_list_entry
*entry
, void *arg
)
3739 struct thread_info
*thread
= (struct thread_info
*) entry
;
3740 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3742 struct thread_resume_array
*r
;
3746 for (ndx
= 0; ndx
< r
->n
; ndx
++)
3748 ptid_t ptid
= r
->resume
[ndx
].thread
;
3749 if (ptid_equal (ptid
, minus_one_ptid
)
3750 || ptid_equal (ptid
, entry
->id
)
3751 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3753 || (ptid_get_pid (ptid
) == pid_of (thread
)
3754 && (ptid_is_pid (ptid
)
3755 || ptid_get_lwp (ptid
) == -1)))
3757 if (r
->resume
[ndx
].kind
== resume_stop
3758 && thread
->last_resume_kind
== resume_stop
)
3761 debug_printf ("already %s LWP %ld at GDB's request\n",
3762 (thread
->last_status
.kind
3763 == TARGET_WAITKIND_STOPPED
)
3771 lwp
->resume
= &r
->resume
[ndx
];
3772 thread
->last_resume_kind
= lwp
->resume
->kind
;
3774 lwp
->step_range_start
= lwp
->resume
->step_range_start
;
3775 lwp
->step_range_end
= lwp
->resume
->step_range_end
;
3777 /* If we had a deferred signal to report, dequeue one now.
3778 This can happen if LWP gets more than one signal while
3779 trying to get out of a jump pad. */
3781 && !lwp
->status_pending_p
3782 && dequeue_one_deferred_signal (lwp
, &lwp
->status_pending
))
3784 lwp
->status_pending_p
= 1;
3787 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3788 "leaving status pending.\n",
3789 WSTOPSIG (lwp
->status_pending
),
3797 /* No resume action for this thread. */
3803 /* find_inferior callback for linux_resume.
3804 Set *FLAG_P if this lwp has an interesting status pending. */
3807 resume_status_pending_p (struct inferior_list_entry
*entry
, void *flag_p
)
3809 struct thread_info
*thread
= (struct thread_info
*) entry
;
3810 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3812 /* LWPs which will not be resumed are not interesting, because
3813 we might not wait for them next time through linux_wait. */
3814 if (lwp
->resume
== NULL
)
3817 if (thread_still_has_status_pending_p (thread
))
3818 * (int *) flag_p
= 1;
3823 /* Return 1 if this lwp that GDB wants running is stopped at an
3824 internal breakpoint that we need to step over. It assumes that any
3825 required STOP_PC adjustment has already been propagated to the
3826 inferior's regcache. */
3829 need_step_over_p (struct inferior_list_entry
*entry
, void *dummy
)
3831 struct thread_info
*thread
= (struct thread_info
*) entry
;
3832 struct lwp_info
*lwp
= get_thread_lwp (thread
);
3833 struct thread_info
*saved_thread
;
3836 /* LWPs which will not be resumed are not interesting, because we
3837 might not wait for them next time through linux_wait. */
3842 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3847 if (thread
->last_resume_kind
== resume_stop
)
3850 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3856 gdb_assert (lwp
->suspended
>= 0);
3861 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3866 if (!lwp
->need_step_over
)
3869 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread
));
3872 if (lwp
->status_pending_p
)
3875 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3881 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3885 /* If the PC has changed since we stopped, then don't do anything,
3886 and let the breakpoint/tracepoint be hit. This happens if, for
3887 instance, GDB handled the decr_pc_after_break subtraction itself,
3888 GDB is OOL stepping this thread, or the user has issued a "jump"
3889 command, or poked thread's registers herself. */
3890 if (pc
!= lwp
->stop_pc
)
3893 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3894 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3896 paddress (lwp
->stop_pc
), paddress (pc
));
3898 lwp
->need_step_over
= 0;
3902 saved_thread
= current_thread
;
3903 current_thread
= thread
;
3905 /* We can only step over breakpoints we know about. */
3906 if (breakpoint_here (pc
) || fast_tracepoint_jump_here (pc
))
3908 /* Don't step over a breakpoint that GDB expects to hit
3909 though. If the condition is being evaluated on the target's side
3910 and it evaluate to false, step over this breakpoint as well. */
3911 if (gdb_breakpoint_here (pc
)
3912 && gdb_condition_true_at_breakpoint (pc
)
3913 && gdb_no_commands_at_breakpoint (pc
))
3916 debug_printf ("Need step over [LWP %ld]? yes, but found"
3917 " GDB breakpoint at 0x%s; skipping step over\n",
3918 lwpid_of (thread
), paddress (pc
));
3920 current_thread
= saved_thread
;
3926 debug_printf ("Need step over [LWP %ld]? yes, "
3927 "found breakpoint at 0x%s\n",
3928 lwpid_of (thread
), paddress (pc
));
3930 /* We've found an lwp that needs stepping over --- return 1 so
3931 that find_inferior stops looking. */
3932 current_thread
= saved_thread
;
3934 /* If the step over is cancelled, this is set again. */
3935 lwp
->need_step_over
= 0;
3940 current_thread
= saved_thread
;
3943 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3945 lwpid_of (thread
), paddress (pc
));
3950 /* Start a step-over operation on LWP. When LWP stopped at a
3951 breakpoint, to make progress, we need to remove the breakpoint out
3952 of the way. If we let other threads run while we do that, they may
3953 pass by the breakpoint location and miss hitting it. To avoid
3954 that, a step-over momentarily stops all threads while LWP is
3955 single-stepped while the breakpoint is temporarily uninserted from
3956 the inferior. When the single-step finishes, we reinsert the
3957 breakpoint, and let all threads that are supposed to be running,
3960 On targets that don't support hardware single-step, we don't
3961 currently support full software single-stepping. Instead, we only
3962 support stepping over the thread event breakpoint, by asking the
3963 low target where to place a reinsert breakpoint. Since this
3964 routine assumes the breakpoint being stepped over is a thread event
3965 breakpoint, it usually assumes the return address of the current
3966 function is a good enough place to set the reinsert breakpoint. */
3969 start_step_over (struct lwp_info
*lwp
)
3971 struct thread_info
*thread
= get_lwp_thread (lwp
);
3972 struct thread_info
*saved_thread
;
3977 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3980 stop_all_lwps (1, lwp
);
3981 gdb_assert (lwp
->suspended
== 0);
3984 debug_printf ("Done stopping all threads for step-over.\n");
3986 /* Note, we should always reach here with an already adjusted PC,
3987 either by GDB (if we're resuming due to GDB's request), or by our
3988 caller, if we just finished handling an internal breakpoint GDB
3989 shouldn't care about. */
3992 saved_thread
= current_thread
;
3993 current_thread
= thread
;
3995 lwp
->bp_reinsert
= pc
;
3996 uninsert_breakpoints_at (pc
);
3997 uninsert_fast_tracepoint_jumps_at (pc
);
3999 if (can_hardware_single_step ())
4005 CORE_ADDR raddr
= (*the_low_target
.breakpoint_reinsert_addr
) ();
4006 set_reinsert_breakpoint (raddr
);
4010 current_thread
= saved_thread
;
4012 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4014 /* Require next event from this LWP. */
4015 step_over_bkpt
= thread
->entry
.id
;
4019 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4020 start_step_over, if still there, and delete any reinsert
4021 breakpoints we've set, on non hardware single-step targets. */
4024 finish_step_over (struct lwp_info
*lwp
)
4026 if (lwp
->bp_reinsert
!= 0)
4029 debug_printf ("Finished step over.\n");
4031 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4032 may be no breakpoint to reinsert there by now. */
4033 reinsert_breakpoints_at (lwp
->bp_reinsert
);
4034 reinsert_fast_tracepoint_jumps_at (lwp
->bp_reinsert
);
4036 lwp
->bp_reinsert
= 0;
4038 /* Delete any software-single-step reinsert breakpoints. No
4039 longer needed. We don't have to worry about other threads
4040 hitting this trap, and later not being able to explain it,
4041 because we were stepping over a breakpoint, and we hold all
4042 threads but LWP stopped while doing that. */
4043 if (!can_hardware_single_step ())
4044 delete_reinsert_breakpoints ();
4046 step_over_bkpt
= null_ptid
;
4053 /* This function is called once per thread. We check the thread's resume
4054 request, which will tell us whether to resume, step, or leave the thread
4055 stopped; and what signal, if any, it should be sent.
4057 For threads which we aren't explicitly told otherwise, we preserve
4058 the stepping flag; this is used for stepping over gdbserver-placed
4061 If pending_flags was set in any thread, we queue any needed
4062 signals, since we won't actually resume. We already have a pending
4063 event to report, so we don't need to preserve any step requests;
4064 they should be re-issued if necessary. */
4067 linux_resume_one_thread (struct inferior_list_entry
*entry
, void *arg
)
4069 struct thread_info
*thread
= (struct thread_info
*) entry
;
4070 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4072 int leave_all_stopped
= * (int *) arg
;
4075 if (lwp
->resume
== NULL
)
4078 if (lwp
->resume
->kind
== resume_stop
)
4081 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread
));
4086 debug_printf ("stopping LWP %ld\n", lwpid_of (thread
));
4088 /* Stop the thread, and wait for the event asynchronously,
4089 through the event loop. */
4095 debug_printf ("already stopped LWP %ld\n",
4098 /* The LWP may have been stopped in an internal event that
4099 was not meant to be notified back to GDB (e.g., gdbserver
4100 breakpoint), so we should be reporting a stop event in
4103 /* If the thread already has a pending SIGSTOP, this is a
4104 no-op. Otherwise, something later will presumably resume
4105 the thread and this will cause it to cancel any pending
4106 operation, due to last_resume_kind == resume_stop. If
4107 the thread already has a pending status to report, we
4108 will still report it the next time we wait - see
4109 status_pending_p_callback. */
4111 /* If we already have a pending signal to report, then
4112 there's no need to queue a SIGSTOP, as this means we're
4113 midway through moving the LWP out of the jumppad, and we
4114 will report the pending signal as soon as that is
4116 if (lwp
->pending_signals_to_report
== NULL
)
4120 /* For stop requests, we're done. */
4122 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4126 /* If this thread which is about to be resumed has a pending status,
4127 then don't resume any threads - we can just report the pending
4128 status. Make sure to queue any signals that would otherwise be
4129 sent. In all-stop mode, we do this decision based on if *any*
4130 thread has a pending status. If there's a thread that needs the
4131 step-over-breakpoint dance, then don't resume any other thread
4132 but that particular one. */
4133 leave_pending
= (lwp
->status_pending_p
|| leave_all_stopped
);
4138 debug_printf ("resuming LWP %ld\n", lwpid_of (thread
));
4140 step
= (lwp
->resume
->kind
== resume_step
);
4141 linux_resume_one_lwp (lwp
, step
, lwp
->resume
->sig
, NULL
);
4146 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread
));
4148 /* If we have a new signal, enqueue the signal. */
4149 if (lwp
->resume
->sig
!= 0)
4151 struct pending_signals
*p_sig
;
4152 p_sig
= xmalloc (sizeof (*p_sig
));
4153 p_sig
->prev
= lwp
->pending_signals
;
4154 p_sig
->signal
= lwp
->resume
->sig
;
4155 memset (&p_sig
->info
, 0, sizeof (siginfo_t
));
4157 /* If this is the same signal we were previously stopped by,
4158 make sure to queue its siginfo. We can ignore the return
4159 value of ptrace; if it fails, we'll skip
4160 PTRACE_SETSIGINFO. */
4161 if (WIFSTOPPED (lwp
->last_status
)
4162 && WSTOPSIG (lwp
->last_status
) == lwp
->resume
->sig
)
4163 ptrace (PTRACE_GETSIGINFO
, lwpid_of (thread
), (PTRACE_TYPE_ARG3
) 0,
4166 lwp
->pending_signals
= p_sig
;
4170 thread
->last_status
.kind
= TARGET_WAITKIND_IGNORE
;
4176 linux_resume (struct thread_resume
*resume_info
, size_t n
)
4178 struct thread_resume_array array
= { resume_info
, n
};
4179 struct thread_info
*need_step_over
= NULL
;
4181 int leave_all_stopped
;
4186 debug_printf ("linux_resume:\n");
4189 find_inferior (&all_threads
, linux_set_resume_request
, &array
);
4191 /* If there is a thread which would otherwise be resumed, which has
4192 a pending status, then don't resume any threads - we can just
4193 report the pending status. Make sure to queue any signals that
4194 would otherwise be sent. In non-stop mode, we'll apply this
4195 logic to each thread individually. We consume all pending events
4196 before considering to start a step-over (in all-stop). */
4199 find_inferior (&all_threads
, resume_status_pending_p
, &any_pending
);
4201 /* If there is a thread which would otherwise be resumed, which is
4202 stopped at a breakpoint that needs stepping over, then don't
4203 resume any threads - have it step over the breakpoint with all
4204 other threads stopped, then resume all threads again. Make sure
4205 to queue any signals that would otherwise be delivered or
4207 if (!any_pending
&& supports_breakpoints ())
4209 = (struct thread_info
*) find_inferior (&all_threads
,
4210 need_step_over_p
, NULL
);
4212 leave_all_stopped
= (need_step_over
!= NULL
|| any_pending
);
4216 if (need_step_over
!= NULL
)
4217 debug_printf ("Not resuming all, need step over\n");
4218 else if (any_pending
)
4219 debug_printf ("Not resuming, all-stop and found "
4220 "an LWP with pending status\n");
4222 debug_printf ("Resuming, no pending status or step over needed\n");
4225 /* Even if we're leaving threads stopped, queue all signals we'd
4226 otherwise deliver. */
4227 find_inferior (&all_threads
, linux_resume_one_thread
, &leave_all_stopped
);
4230 start_step_over (get_thread_lwp (need_step_over
));
4234 debug_printf ("linux_resume done\n");
4239 /* This function is called once per thread. We check the thread's
4240 last resume request, which will tell us whether to resume, step, or
4241 leave the thread stopped. Any signal the client requested to be
4242 delivered has already been enqueued at this point.
4244 If any thread that GDB wants running is stopped at an internal
4245 breakpoint that needs stepping over, we start a step-over operation
4246 on that particular thread, and leave all others stopped. */
4249 proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4251 struct thread_info
*thread
= (struct thread_info
*) entry
;
4252 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4259 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread
));
4264 debug_printf (" LWP %ld already running\n", lwpid_of (thread
));
4268 if (thread
->last_resume_kind
== resume_stop
4269 && thread
->last_status
.kind
!= TARGET_WAITKIND_IGNORE
)
4272 debug_printf (" client wants LWP to remain %ld stopped\n",
4277 if (lwp
->status_pending_p
)
4280 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4285 gdb_assert (lwp
->suspended
>= 0);
4290 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread
));
4294 if (thread
->last_resume_kind
== resume_stop
4295 && lwp
->pending_signals_to_report
== NULL
4296 && lwp
->collecting_fast_tracepoint
== 0)
4298 /* We haven't reported this LWP as stopped yet (otherwise, the
4299 last_status.kind check above would catch it, and we wouldn't
4300 reach here. This LWP may have been momentarily paused by a
4301 stop_all_lwps call while handling for example, another LWP's
4302 step-over. In that case, the pending expected SIGSTOP signal
4303 that was queued at vCont;t handling time will have already
4304 been consumed by wait_for_sigstop, and so we need to requeue
4305 another one here. Note that if the LWP already has a SIGSTOP
4306 pending, this is a no-op. */
4309 debug_printf ("Client wants LWP %ld to stop. "
4310 "Making sure it has a SIGSTOP pending\n",
4316 step
= thread
->last_resume_kind
== resume_step
;
4317 linux_resume_one_lwp (lwp
, step
, 0, NULL
);
4322 unsuspend_and_proceed_one_lwp (struct inferior_list_entry
*entry
, void *except
)
4324 struct thread_info
*thread
= (struct thread_info
*) entry
;
4325 struct lwp_info
*lwp
= get_thread_lwp (thread
);
4331 gdb_assert (lwp
->suspended
>= 0);
4333 return proceed_one_lwp (entry
, except
);
4336 /* When we finish a step-over, set threads running again. If there's
4337 another thread that may need a step-over, now's the time to start
4338 it. Eventually, we'll move all threads past their breakpoints. */
4341 proceed_all_lwps (void)
4343 struct thread_info
*need_step_over
;
4345 /* If there is a thread which would otherwise be resumed, which is
4346 stopped at a breakpoint that needs stepping over, then don't
4347 resume any threads - have it step over the breakpoint with all
4348 other threads stopped, then resume all threads again. */
4350 if (supports_breakpoints ())
4353 = (struct thread_info
*) find_inferior (&all_threads
,
4354 need_step_over_p
, NULL
);
4356 if (need_step_over
!= NULL
)
4359 debug_printf ("proceed_all_lwps: found "
4360 "thread %ld needing a step-over\n",
4361 lwpid_of (need_step_over
));
4363 start_step_over (get_thread_lwp (need_step_over
));
4369 debug_printf ("Proceeding, no step-over needed\n");
4371 find_inferior (&all_threads
, proceed_one_lwp
, NULL
);
4374 /* Stopped LWPs that the client wanted to be running, that don't have
4375 pending statuses, are set to run again, except for EXCEPT, if not
4376 NULL. This undoes a stop_all_lwps call. */
4379 unstop_all_lwps (int unsuspend
, struct lwp_info
*except
)
4385 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4386 lwpid_of (get_lwp_thread (except
)));
4388 debug_printf ("unstopping all lwps\n");
4392 find_inferior (&all_threads
, unsuspend_and_proceed_one_lwp
, except
);
4394 find_inferior (&all_threads
, proceed_one_lwp
, except
);
4398 debug_printf ("unstop_all_lwps done\n");
4404 #ifdef HAVE_LINUX_REGSETS
4406 #define use_linux_regsets 1
4408 /* Returns true if REGSET has been disabled. */
4411 regset_disabled (struct regsets_info
*info
, struct regset_info
*regset
)
4413 return (info
->disabled_regsets
!= NULL
4414 && info
->disabled_regsets
[regset
- info
->regsets
]);
4417 /* Disable REGSET. */
4420 disable_regset (struct regsets_info
*info
, struct regset_info
*regset
)
4424 dr_offset
= regset
- info
->regsets
;
4425 if (info
->disabled_regsets
== NULL
)
4426 info
->disabled_regsets
= xcalloc (1, info
->num_regsets
);
4427 info
->disabled_regsets
[dr_offset
] = 1;
4431 regsets_fetch_inferior_registers (struct regsets_info
*regsets_info
,
4432 struct regcache
*regcache
)
4434 struct regset_info
*regset
;
4435 int saw_general_regs
= 0;
4439 pid
= lwpid_of (current_thread
);
4440 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
4445 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
))
4448 buf
= xmalloc (regset
->size
);
4450 nt_type
= regset
->nt_type
;
4454 iov
.iov_len
= regset
->size
;
4455 data
= (void *) &iov
;
4461 res
= ptrace (regset
->get_request
, pid
,
4462 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4464 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4470 /* If we get EIO on a regset, do not try it again for
4471 this process mode. */
4472 disable_regset (regsets_info
, regset
);
4474 else if (errno
== ENODATA
)
4476 /* ENODATA may be returned if the regset is currently
4477 not "active". This can happen in normal operation,
4478 so suppress the warning in this case. */
4483 sprintf (s
, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4490 if (regset
->type
== GENERAL_REGS
)
4491 saw_general_regs
= 1;
4492 regset
->store_function (regcache
, buf
);
4496 if (saw_general_regs
)
4503 regsets_store_inferior_registers (struct regsets_info
*regsets_info
,
4504 struct regcache
*regcache
)
4506 struct regset_info
*regset
;
4507 int saw_general_regs
= 0;
4511 pid
= lwpid_of (current_thread
);
4512 for (regset
= regsets_info
->regsets
; regset
->size
>= 0; regset
++)
4517 if (regset
->size
== 0 || regset_disabled (regsets_info
, regset
)
4518 || regset
->fill_function
== NULL
)
4521 buf
= xmalloc (regset
->size
);
4523 /* First fill the buffer with the current register set contents,
4524 in case there are any items in the kernel's regset that are
4525 not in gdbserver's regcache. */
4527 nt_type
= regset
->nt_type
;
4531 iov
.iov_len
= regset
->size
;
4532 data
= (void *) &iov
;
4538 res
= ptrace (regset
->get_request
, pid
,
4539 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4541 res
= ptrace (regset
->get_request
, pid
, data
, nt_type
);
4546 /* Then overlay our cached registers on that. */
4547 regset
->fill_function (regcache
, buf
);
4549 /* Only now do we write the register set. */
4551 res
= ptrace (regset
->set_request
, pid
,
4552 (PTRACE_TYPE_ARG3
) (long) nt_type
, data
);
4554 res
= ptrace (regset
->set_request
, pid
, data
, nt_type
);
4562 /* If we get EIO on a regset, do not try it again for
4563 this process mode. */
4564 disable_regset (regsets_info
, regset
);
4566 else if (errno
== ESRCH
)
4568 /* At this point, ESRCH should mean the process is
4569 already gone, in which case we simply ignore attempts
4570 to change its registers. See also the related
4571 comment in linux_resume_one_lwp. */
4577 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4580 else if (regset
->type
== GENERAL_REGS
)
4581 saw_general_regs
= 1;
4584 if (saw_general_regs
)
4590 #else /* !HAVE_LINUX_REGSETS */
4592 #define use_linux_regsets 0
4593 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4594 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4598 /* Return 1 if register REGNO is supported by one of the regset ptrace
4599 calls or 0 if it has to be transferred individually. */
4602 linux_register_in_regsets (const struct regs_info
*regs_info
, int regno
)
4604 unsigned char mask
= 1 << (regno
% 8);
4605 size_t index
= regno
/ 8;
4607 return (use_linux_regsets
4608 && (regs_info
->regset_bitmap
== NULL
4609 || (regs_info
->regset_bitmap
[index
] & mask
) != 0));
4612 #ifdef HAVE_LINUX_USRREGS
4615 register_addr (const struct usrregs_info
*usrregs
, int regnum
)
4619 if (regnum
< 0 || regnum
>= usrregs
->num_regs
)
4620 error ("Invalid register number %d.", regnum
);
4622 addr
= usrregs
->regmap
[regnum
];
4627 /* Fetch one register. */
4629 fetch_register (const struct usrregs_info
*usrregs
,
4630 struct regcache
*regcache
, int regno
)
4637 if (regno
>= usrregs
->num_regs
)
4639 if ((*the_low_target
.cannot_fetch_register
) (regno
))
4642 regaddr
= register_addr (usrregs
, regno
);
4646 size
= ((register_size (regcache
->tdesc
, regno
)
4647 + sizeof (PTRACE_XFER_TYPE
) - 1)
4648 & -sizeof (PTRACE_XFER_TYPE
));
4649 buf
= alloca (size
);
4651 pid
= lwpid_of (current_thread
);
4652 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4655 *(PTRACE_XFER_TYPE
*) (buf
+ i
) =
4656 ptrace (PTRACE_PEEKUSER
, pid
,
4657 /* Coerce to a uintptr_t first to avoid potential gcc warning
4658 of coercing an 8 byte integer to a 4 byte pointer. */
4659 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
, (PTRACE_TYPE_ARG4
) 0);
4660 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4662 error ("reading register %d: %s", regno
, strerror (errno
));
4665 if (the_low_target
.supply_ptrace_register
)
4666 the_low_target
.supply_ptrace_register (regcache
, regno
, buf
);
4668 supply_register (regcache
, regno
, buf
);
4671 /* Store one register. */
4673 store_register (const struct usrregs_info
*usrregs
,
4674 struct regcache
*regcache
, int regno
)
4681 if (regno
>= usrregs
->num_regs
)
4683 if ((*the_low_target
.cannot_store_register
) (regno
))
4686 regaddr
= register_addr (usrregs
, regno
);
4690 size
= ((register_size (regcache
->tdesc
, regno
)
4691 + sizeof (PTRACE_XFER_TYPE
) - 1)
4692 & -sizeof (PTRACE_XFER_TYPE
));
4693 buf
= alloca (size
);
4694 memset (buf
, 0, size
);
4696 if (the_low_target
.collect_ptrace_register
)
4697 the_low_target
.collect_ptrace_register (regcache
, regno
, buf
);
4699 collect_register (regcache
, regno
, buf
);
4701 pid
= lwpid_of (current_thread
);
4702 for (i
= 0; i
< size
; i
+= sizeof (PTRACE_XFER_TYPE
))
4705 ptrace (PTRACE_POKEUSER
, pid
,
4706 /* Coerce to a uintptr_t first to avoid potential gcc warning
4707 about coercing an 8 byte integer to a 4 byte pointer. */
4708 (PTRACE_TYPE_ARG3
) (uintptr_t) regaddr
,
4709 (PTRACE_TYPE_ARG4
) *(PTRACE_XFER_TYPE
*) (buf
+ i
));
4712 /* At this point, ESRCH should mean the process is
4713 already gone, in which case we simply ignore attempts
4714 to change its registers. See also the related
4715 comment in linux_resume_one_lwp. */
4719 if ((*the_low_target
.cannot_store_register
) (regno
) == 0)
4720 error ("writing register %d: %s", regno
, strerror (errno
));
4722 regaddr
+= sizeof (PTRACE_XFER_TYPE
);
4726 /* Fetch all registers, or just one, from the child process.
4727 If REGNO is -1, do this for all registers, skipping any that are
4728 assumed to have been retrieved by regsets_fetch_inferior_registers,
4729 unless ALL is non-zero.
4730 Otherwise, REGNO specifies which register (so we can save time). */
4732 usr_fetch_inferior_registers (const struct regs_info
*regs_info
,
4733 struct regcache
*regcache
, int regno
, int all
)
4735 struct usrregs_info
*usr
= regs_info
->usrregs
;
4739 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4740 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4741 fetch_register (usr
, regcache
, regno
);
4744 fetch_register (usr
, regcache
, regno
);
4747 /* Store our register values back into the inferior.
4748 If REGNO is -1, do this for all registers, skipping any that are
4749 assumed to have been saved by regsets_store_inferior_registers,
4750 unless ALL is non-zero.
4751 Otherwise, REGNO specifies which register (so we can save time). */
4753 usr_store_inferior_registers (const struct regs_info
*regs_info
,
4754 struct regcache
*regcache
, int regno
, int all
)
4756 struct usrregs_info
*usr
= regs_info
->usrregs
;
4760 for (regno
= 0; regno
< usr
->num_regs
; regno
++)
4761 if (all
|| !linux_register_in_regsets (regs_info
, regno
))
4762 store_register (usr
, regcache
, regno
);
4765 store_register (usr
, regcache
, regno
);
4768 #else /* !HAVE_LINUX_USRREGS */
4770 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4771 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4777 linux_fetch_registers (struct regcache
*regcache
, int regno
)
4781 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4785 if (the_low_target
.fetch_register
!= NULL
4786 && regs_info
->usrregs
!= NULL
)
4787 for (regno
= 0; regno
< regs_info
->usrregs
->num_regs
; regno
++)
4788 (*the_low_target
.fetch_register
) (regcache
, regno
);
4790 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
, regcache
);
4791 if (regs_info
->usrregs
!= NULL
)
4792 usr_fetch_inferior_registers (regs_info
, regcache
, -1, all
);
4796 if (the_low_target
.fetch_register
!= NULL
4797 && (*the_low_target
.fetch_register
) (regcache
, regno
))
4800 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
4802 all
= regsets_fetch_inferior_registers (regs_info
->regsets_info
,
4804 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
4805 usr_fetch_inferior_registers (regs_info
, regcache
, regno
, 1);
4810 linux_store_registers (struct regcache
*regcache
, int regno
)
4814 const struct regs_info
*regs_info
= (*the_low_target
.regs_info
) ();
4818 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
4820 if (regs_info
->usrregs
!= NULL
)
4821 usr_store_inferior_registers (regs_info
, regcache
, regno
, all
);
4825 use_regsets
= linux_register_in_regsets (regs_info
, regno
);
4827 all
= regsets_store_inferior_registers (regs_info
->regsets_info
,
4829 if ((!use_regsets
|| all
) && regs_info
->usrregs
!= NULL
)
4830 usr_store_inferior_registers (regs_info
, regcache
, regno
, 1);
4835 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4836 to debugger memory starting at MYADDR. */
4839 linux_read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
, int len
)
4841 int pid
= lwpid_of (current_thread
);
4842 register PTRACE_XFER_TYPE
*buffer
;
4843 register CORE_ADDR addr
;
4850 /* Try using /proc. Don't bother for one word. */
4851 if (len
>= 3 * sizeof (long))
4855 /* We could keep this file open and cache it - possibly one per
4856 thread. That requires some juggling, but is even faster. */
4857 sprintf (filename
, "/proc/%d/mem", pid
);
4858 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
4862 /* If pread64 is available, use it. It's faster if the kernel
4863 supports it (only one syscall), and it's 64-bit safe even on
4864 32-bit platforms (for instance, SPARC debugging a SPARC64
4867 bytes
= pread64 (fd
, myaddr
, len
, memaddr
);
4870 if (lseek (fd
, memaddr
, SEEK_SET
) != -1)
4871 bytes
= read (fd
, myaddr
, len
);
4878 /* Some data was read, we'll try to get the rest with ptrace. */
4888 /* Round starting address down to longword boundary. */
4889 addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4890 /* Round ending address up; get number of longwords that makes. */
4891 count
= ((((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4892 / sizeof (PTRACE_XFER_TYPE
));
4893 /* Allocate buffer of that many longwords. */
4894 buffer
= (PTRACE_XFER_TYPE
*) alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4896 /* Read all the longwords */
4898 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4900 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4901 about coercing an 8 byte integer to a 4 byte pointer. */
4902 buffer
[i
] = ptrace (PTRACE_PEEKTEXT
, pid
,
4903 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4904 (PTRACE_TYPE_ARG4
) 0);
4910 /* Copy appropriate bytes out of the buffer. */
4913 i
*= sizeof (PTRACE_XFER_TYPE
);
4914 i
-= memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1);
4916 (char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4923 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4924 memory at MEMADDR. On failure (cannot write to the inferior)
4925 returns the value of errno. Always succeeds if LEN is zero. */
4928 linux_write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
, int len
)
4931 /* Round starting address down to longword boundary. */
4932 register CORE_ADDR addr
= memaddr
& -(CORE_ADDR
) sizeof (PTRACE_XFER_TYPE
);
4933 /* Round ending address up; get number of longwords that makes. */
4935 = (((memaddr
+ len
) - addr
) + sizeof (PTRACE_XFER_TYPE
) - 1)
4936 / sizeof (PTRACE_XFER_TYPE
);
4938 /* Allocate buffer of that many longwords. */
4939 register PTRACE_XFER_TYPE
*buffer
= (PTRACE_XFER_TYPE
*)
4940 alloca (count
* sizeof (PTRACE_XFER_TYPE
));
4942 int pid
= lwpid_of (current_thread
);
4946 /* Zero length write always succeeds. */
4952 /* Dump up to four bytes. */
4953 unsigned int val
= * (unsigned int *) myaddr
;
4959 val
= val
& 0xffffff;
4960 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len
< 4) ? len
: 4),
4961 val
, (long)memaddr
);
4964 /* Fill start and end extra bytes of buffer with existing memory data. */
4967 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4968 about coercing an 8 byte integer to a 4 byte pointer. */
4969 buffer
[0] = ptrace (PTRACE_PEEKTEXT
, pid
,
4970 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
4971 (PTRACE_TYPE_ARG4
) 0);
4979 = ptrace (PTRACE_PEEKTEXT
, pid
,
4980 /* Coerce to a uintptr_t first to avoid potential gcc warning
4981 about coercing an 8 byte integer to a 4 byte pointer. */
4982 (PTRACE_TYPE_ARG3
) (uintptr_t) (addr
+ (count
- 1)
4983 * sizeof (PTRACE_XFER_TYPE
)),
4984 (PTRACE_TYPE_ARG4
) 0);
4989 /* Copy data to be written over corresponding part of buffer. */
4991 memcpy ((char *) buffer
+ (memaddr
& (sizeof (PTRACE_XFER_TYPE
) - 1)),
4994 /* Write the entire buffer. */
4996 for (i
= 0; i
< count
; i
++, addr
+= sizeof (PTRACE_XFER_TYPE
))
4999 ptrace (PTRACE_POKETEXT
, pid
,
5000 /* Coerce to a uintptr_t first to avoid potential gcc warning
5001 about coercing an 8 byte integer to a 4 byte pointer. */
5002 (PTRACE_TYPE_ARG3
) (uintptr_t) addr
,
5003 (PTRACE_TYPE_ARG4
) buffer
[i
]);
5012 linux_look_up_symbols (void)
5014 #ifdef USE_THREAD_DB
5015 struct process_info
*proc
= current_process ();
5017 if (proc
->priv
->thread_db
!= NULL
)
5020 /* If the kernel supports tracing clones, then we don't need to
5021 use the magic thread event breakpoint to learn about
5023 thread_db_init (!linux_supports_traceclone ());
5028 linux_request_interrupt (void)
5030 extern unsigned long signal_pid
;
5032 /* Send a SIGINT to the process group. This acts just like the user
5033 typed a ^C on the controlling terminal. */
5034 kill (-signal_pid
, SIGINT
);
5037 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5038 to debugger memory starting at MYADDR. */
5041 linux_read_auxv (CORE_ADDR offset
, unsigned char *myaddr
, unsigned int len
)
5043 char filename
[PATH_MAX
];
5045 int pid
= lwpid_of (current_thread
);
5047 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5049 fd
= open (filename
, O_RDONLY
);
5053 if (offset
!= (CORE_ADDR
) 0
5054 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5057 n
= read (fd
, myaddr
, len
);
5064 /* These breakpoint and watchpoint related wrapper functions simply
5065 pass on the function call if the target has registered a
5066 corresponding function. */
5069 linux_supports_z_point_type (char z_type
)
5071 return (the_low_target
.supports_z_point_type
!= NULL
5072 && the_low_target
.supports_z_point_type (z_type
));
5076 linux_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5077 int size
, struct raw_breakpoint
*bp
)
5079 if (the_low_target
.insert_point
!= NULL
)
5080 return the_low_target
.insert_point (type
, addr
, size
, bp
);
5082 /* Unsupported (see target.h). */
5087 linux_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
5088 int size
, struct raw_breakpoint
*bp
)
5090 if (the_low_target
.remove_point
!= NULL
)
5091 return the_low_target
.remove_point (type
, addr
, size
, bp
);
5093 /* Unsupported (see target.h). */
5097 /* Implement the to_stopped_by_sw_breakpoint target_ops
5101 linux_stopped_by_sw_breakpoint (void)
5103 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5105 return (lwp
->stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
);
5108 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5112 linux_supports_stopped_by_sw_breakpoint (void)
5114 return USE_SIGTRAP_SIGINFO
;
5117 /* Implement the to_stopped_by_hw_breakpoint target_ops
5121 linux_stopped_by_hw_breakpoint (void)
5123 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5125 return (lwp
->stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
);
5128 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5132 linux_supports_stopped_by_hw_breakpoint (void)
5134 return USE_SIGTRAP_SIGINFO
;
5138 linux_stopped_by_watchpoint (void)
5140 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5142 return lwp
->stop_reason
== TARGET_STOPPED_BY_WATCHPOINT
;
5146 linux_stopped_data_address (void)
5148 struct lwp_info
*lwp
= get_thread_lwp (current_thread
);
5150 return lwp
->stopped_data_address
;
5153 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5154 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5155 && defined(PT_TEXT_END_ADDR)
5157 /* This is only used for targets that define PT_TEXT_ADDR,
5158 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5159 the target has different ways of acquiring this information, like
5162 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5163 to tell gdb about. */
5166 linux_read_offsets (CORE_ADDR
*text_p
, CORE_ADDR
*data_p
)
5168 unsigned long text
, text_end
, data
;
5169 int pid
= lwpid_of (get_thread_lwp (current_thread
));
5173 text
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_ADDR
,
5174 (PTRACE_TYPE_ARG4
) 0);
5175 text_end
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_TEXT_END_ADDR
,
5176 (PTRACE_TYPE_ARG4
) 0);
5177 data
= ptrace (PTRACE_PEEKUSER
, pid
, (PTRACE_TYPE_ARG3
) PT_DATA_ADDR
,
5178 (PTRACE_TYPE_ARG4
) 0);
5182 /* Both text and data offsets produced at compile-time (and so
5183 used by gdb) are relative to the beginning of the program,
5184 with the data segment immediately following the text segment.
5185 However, the actual runtime layout in memory may put the data
5186 somewhere else, so when we send gdb a data base-address, we
5187 use the real data base address and subtract the compile-time
5188 data base-address from it (which is just the length of the
5189 text segment). BSS immediately follows data in both
5192 *data_p
= data
- (text_end
- text
);
5201 linux_qxfer_osdata (const char *annex
,
5202 unsigned char *readbuf
, unsigned const char *writebuf
,
5203 CORE_ADDR offset
, int len
)
5205 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
5208 /* Convert a native/host siginfo object, into/from the siginfo in the
5209 layout of the inferiors' architecture. */
5212 siginfo_fixup (siginfo_t
*siginfo
, void *inf_siginfo
, int direction
)
5216 if (the_low_target
.siginfo_fixup
!= NULL
)
5217 done
= the_low_target
.siginfo_fixup (siginfo
, inf_siginfo
, direction
);
5219 /* If there was no callback, or the callback didn't do anything,
5220 then just do a straight memcpy. */
5224 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
5226 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
5231 linux_xfer_siginfo (const char *annex
, unsigned char *readbuf
,
5232 unsigned const char *writebuf
, CORE_ADDR offset
, int len
)
5236 char inf_siginfo
[sizeof (siginfo_t
)];
5238 if (current_thread
== NULL
)
5241 pid
= lwpid_of (current_thread
);
5244 debug_printf ("%s siginfo for lwp %d.\n",
5245 readbuf
!= NULL
? "Reading" : "Writing",
5248 if (offset
>= sizeof (siginfo
))
5251 if (ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5254 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5255 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5256 inferior with a 64-bit GDBSERVER should look the same as debugging it
5257 with a 32-bit GDBSERVER, we need to convert it. */
5258 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
5260 if (offset
+ len
> sizeof (siginfo
))
5261 len
= sizeof (siginfo
) - offset
;
5263 if (readbuf
!= NULL
)
5264 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
5267 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
5269 /* Convert back to ptrace layout before flushing it out. */
5270 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
5272 if (ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
) != 0)
5279 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5280 so we notice when children change state; as the handler for the
5281 sigsuspend in my_waitpid. */
5284 sigchld_handler (int signo
)
5286 int old_errno
= errno
;
5292 /* fprintf is not async-signal-safe, so call write
5294 if (write (2, "sigchld_handler\n",
5295 sizeof ("sigchld_handler\n") - 1) < 0)
5296 break; /* just ignore */
5300 if (target_is_async_p ())
5301 async_file_mark (); /* trigger a linux_wait */
5307 linux_supports_non_stop (void)
5313 linux_async (int enable
)
5315 int previous
= target_is_async_p ();
5318 debug_printf ("linux_async (%d), previous=%d\n",
5321 if (previous
!= enable
)
5324 sigemptyset (&mask
);
5325 sigaddset (&mask
, SIGCHLD
);
5327 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
5331 if (pipe (linux_event_pipe
) == -1)
5333 linux_event_pipe
[0] = -1;
5334 linux_event_pipe
[1] = -1;
5335 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
5337 warning ("creating event pipe failed.");
5341 fcntl (linux_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
5342 fcntl (linux_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
5344 /* Register the event loop handler. */
5345 add_file_handler (linux_event_pipe
[0],
5346 handle_target_event
, NULL
);
5348 /* Always trigger a linux_wait. */
5353 delete_file_handler (linux_event_pipe
[0]);
5355 close (linux_event_pipe
[0]);
5356 close (linux_event_pipe
[1]);
5357 linux_event_pipe
[0] = -1;
5358 linux_event_pipe
[1] = -1;
5361 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
5368 linux_start_non_stop (int nonstop
)
5370 /* Register or unregister from event-loop accordingly. */
5371 linux_async (nonstop
);
5373 if (target_is_async_p () != (nonstop
!= 0))
5380 linux_supports_multi_process (void)
5386 linux_supports_disable_randomization (void)
5388 #ifdef HAVE_PERSONALITY
5396 linux_supports_agent (void)
5402 linux_supports_range_stepping (void)
5404 if (*the_low_target
.supports_range_stepping
== NULL
)
5407 return (*the_low_target
.supports_range_stepping
) ();
5410 /* Enumerate spufs IDs for process PID. */
5412 spu_enumerate_spu_ids (long pid
, unsigned char *buf
, CORE_ADDR offset
, int len
)
5418 struct dirent
*entry
;
5420 sprintf (path
, "/proc/%ld/fd", pid
);
5421 dir
= opendir (path
);
5426 while ((entry
= readdir (dir
)) != NULL
)
5432 fd
= atoi (entry
->d_name
);
5436 sprintf (path
, "/proc/%ld/fd/%d", pid
, fd
);
5437 if (stat (path
, &st
) != 0)
5439 if (!S_ISDIR (st
.st_mode
))
5442 if (statfs (path
, &stfs
) != 0)
5444 if (stfs
.f_type
!= SPUFS_MAGIC
)
5447 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
5449 *(unsigned int *)(buf
+ pos
- offset
) = fd
;
5459 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5460 object type, using the /proc file system. */
5462 linux_qxfer_spu (const char *annex
, unsigned char *readbuf
,
5463 unsigned const char *writebuf
,
5464 CORE_ADDR offset
, int len
)
5466 long pid
= lwpid_of (current_thread
);
5471 if (!writebuf
&& !readbuf
)
5479 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
5482 sprintf (buf
, "/proc/%ld/fd/%s", pid
, annex
);
5483 fd
= open (buf
, writebuf
? O_WRONLY
: O_RDONLY
);
5488 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
5495 ret
= write (fd
, writebuf
, (size_t) len
);
5497 ret
= read (fd
, readbuf
, (size_t) len
);
5503 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5504 struct target_loadseg
5506 /* Core address to which the segment is mapped. */
5508 /* VMA recorded in the program header. */
5510 /* Size of this segment in memory. */
5514 # if defined PT_GETDSBT
5515 struct target_loadmap
5517 /* Protocol version number, must be zero. */
5519 /* Pointer to the DSBT table, its size, and the DSBT index. */
5520 unsigned *dsbt_table
;
5521 unsigned dsbt_size
, dsbt_index
;
5522 /* Number of segments in this map. */
5524 /* The actual memory map. */
5525 struct target_loadseg segs
[/*nsegs*/];
5527 # define LINUX_LOADMAP PT_GETDSBT
5528 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5529 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5531 struct target_loadmap
5533 /* Protocol version number, must be zero. */
5535 /* Number of segments in this map. */
5537 /* The actual memory map. */
5538 struct target_loadseg segs
[/*nsegs*/];
5540 # define LINUX_LOADMAP PTRACE_GETFDPIC
5541 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5542 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5546 linux_read_loadmap (const char *annex
, CORE_ADDR offset
,
5547 unsigned char *myaddr
, unsigned int len
)
5549 int pid
= lwpid_of (current_thread
);
5551 struct target_loadmap
*data
= NULL
;
5552 unsigned int actual_length
, copy_length
;
5554 if (strcmp (annex
, "exec") == 0)
5555 addr
= (int) LINUX_LOADMAP_EXEC
;
5556 else if (strcmp (annex
, "interp") == 0)
5557 addr
= (int) LINUX_LOADMAP_INTERP
;
5561 if (ptrace (LINUX_LOADMAP
, pid
, addr
, &data
) != 0)
5567 actual_length
= sizeof (struct target_loadmap
)
5568 + sizeof (struct target_loadseg
) * data
->nsegs
;
5570 if (offset
< 0 || offset
> actual_length
)
5573 copy_length
= actual_length
- offset
< len
? actual_length
- offset
: len
;
5574 memcpy (myaddr
, (char *) data
+ offset
, copy_length
);
5578 # define linux_read_loadmap NULL
5579 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5582 linux_process_qsupported (const char *query
)
5584 if (the_low_target
.process_qsupported
!= NULL
)
5585 the_low_target
.process_qsupported (query
);
5589 linux_supports_tracepoints (void)
5591 if (*the_low_target
.supports_tracepoints
== NULL
)
5594 return (*the_low_target
.supports_tracepoints
) ();
5598 linux_read_pc (struct regcache
*regcache
)
5600 if (the_low_target
.get_pc
== NULL
)
5603 return (*the_low_target
.get_pc
) (regcache
);
5607 linux_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
5609 gdb_assert (the_low_target
.set_pc
!= NULL
);
5611 (*the_low_target
.set_pc
) (regcache
, pc
);
5615 linux_thread_stopped (struct thread_info
*thread
)
5617 return get_thread_lwp (thread
)->stopped
;
5620 /* This exposes stop-all-threads functionality to other modules. */
5623 linux_pause_all (int freeze
)
5625 stop_all_lwps (freeze
, NULL
);
5628 /* This exposes unstop-all-threads functionality to other gdbserver
5632 linux_unpause_all (int unfreeze
)
5634 unstop_all_lwps (unfreeze
, NULL
);
5638 linux_prepare_to_access_memory (void)
5640 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5643 linux_pause_all (1);
5648 linux_done_accessing_memory (void)
5650 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5653 linux_unpause_all (1);
5657 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
5658 CORE_ADDR collector
,
5661 CORE_ADDR
*jump_entry
,
5662 CORE_ADDR
*trampoline
,
5663 ULONGEST
*trampoline_size
,
5664 unsigned char *jjump_pad_insn
,
5665 ULONGEST
*jjump_pad_insn_size
,
5666 CORE_ADDR
*adjusted_insn_addr
,
5667 CORE_ADDR
*adjusted_insn_addr_end
,
5670 return (*the_low_target
.install_fast_tracepoint_jump_pad
)
5671 (tpoint
, tpaddr
, collector
, lockaddr
, orig_size
,
5672 jump_entry
, trampoline
, trampoline_size
,
5673 jjump_pad_insn
, jjump_pad_insn_size
,
5674 adjusted_insn_addr
, adjusted_insn_addr_end
,
5678 static struct emit_ops
*
5679 linux_emit_ops (void)
5681 if (the_low_target
.emit_ops
!= NULL
)
5682 return (*the_low_target
.emit_ops
) ();
5688 linux_get_min_fast_tracepoint_insn_len (void)
5690 return (*the_low_target
.get_min_fast_tracepoint_insn_len
) ();
5693 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5696 get_phdr_phnum_from_proc_auxv (const int pid
, const int is_elf64
,
5697 CORE_ADDR
*phdr_memaddr
, int *num_phdr
)
5699 char filename
[PATH_MAX
];
5701 const int auxv_size
= is_elf64
5702 ? sizeof (Elf64_auxv_t
) : sizeof (Elf32_auxv_t
);
5703 char buf
[sizeof (Elf64_auxv_t
)]; /* The larger of the two. */
5705 xsnprintf (filename
, sizeof filename
, "/proc/%d/auxv", pid
);
5707 fd
= open (filename
, O_RDONLY
);
5713 while (read (fd
, buf
, auxv_size
) == auxv_size
5714 && (*phdr_memaddr
== 0 || *num_phdr
== 0))
5718 Elf64_auxv_t
*const aux
= (Elf64_auxv_t
*) buf
;
5720 switch (aux
->a_type
)
5723 *phdr_memaddr
= aux
->a_un
.a_val
;
5726 *num_phdr
= aux
->a_un
.a_val
;
5732 Elf32_auxv_t
*const aux
= (Elf32_auxv_t
*) buf
;
5734 switch (aux
->a_type
)
5737 *phdr_memaddr
= aux
->a_un
.a_val
;
5740 *num_phdr
= aux
->a_un
.a_val
;
5748 if (*phdr_memaddr
== 0 || *num_phdr
== 0)
5750 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5751 "phdr_memaddr = %ld, phdr_num = %d",
5752 (long) *phdr_memaddr
, *num_phdr
);
5759 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5762 get_dynamic (const int pid
, const int is_elf64
)
5764 CORE_ADDR phdr_memaddr
, relocation
;
5766 unsigned char *phdr_buf
;
5767 const int phdr_size
= is_elf64
? sizeof (Elf64_Phdr
) : sizeof (Elf32_Phdr
);
5769 if (get_phdr_phnum_from_proc_auxv (pid
, is_elf64
, &phdr_memaddr
, &num_phdr
))
5772 gdb_assert (num_phdr
< 100); /* Basic sanity check. */
5773 phdr_buf
= alloca (num_phdr
* phdr_size
);
5775 if (linux_read_memory (phdr_memaddr
, phdr_buf
, num_phdr
* phdr_size
))
5778 /* Compute relocation: it is expected to be 0 for "regular" executables,
5779 non-zero for PIE ones. */
5781 for (i
= 0; relocation
== -1 && i
< num_phdr
; i
++)
5784 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5786 if (p
->p_type
== PT_PHDR
)
5787 relocation
= phdr_memaddr
- p
->p_vaddr
;
5791 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5793 if (p
->p_type
== PT_PHDR
)
5794 relocation
= phdr_memaddr
- p
->p_vaddr
;
5797 if (relocation
== -1)
5799 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5800 any real world executables, including PIE executables, have always
5801 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5802 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5803 or present DT_DEBUG anyway (fpc binaries are statically linked).
5805 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5807 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5812 for (i
= 0; i
< num_phdr
; i
++)
5816 Elf64_Phdr
*const p
= (Elf64_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5818 if (p
->p_type
== PT_DYNAMIC
)
5819 return p
->p_vaddr
+ relocation
;
5823 Elf32_Phdr
*const p
= (Elf32_Phdr
*) (phdr_buf
+ i
* phdr_size
);
5825 if (p
->p_type
== PT_DYNAMIC
)
5826 return p
->p_vaddr
+ relocation
;
5833 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5834 can be 0 if the inferior does not yet have the library list initialized.
5835 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5836 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5839 get_r_debug (const int pid
, const int is_elf64
)
5841 CORE_ADDR dynamic_memaddr
;
5842 const int dyn_size
= is_elf64
? sizeof (Elf64_Dyn
) : sizeof (Elf32_Dyn
);
5843 unsigned char buf
[sizeof (Elf64_Dyn
)]; /* The larger of the two. */
5846 dynamic_memaddr
= get_dynamic (pid
, is_elf64
);
5847 if (dynamic_memaddr
== 0)
5850 while (linux_read_memory (dynamic_memaddr
, buf
, dyn_size
) == 0)
5854 Elf64_Dyn
*const dyn
= (Elf64_Dyn
*) buf
;
5855 #ifdef DT_MIPS_RLD_MAP
5859 unsigned char buf
[sizeof (Elf64_Xword
)];
5863 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
5865 if (linux_read_memory (dyn
->d_un
.d_val
,
5866 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
5871 #endif /* DT_MIPS_RLD_MAP */
5873 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
5874 map
= dyn
->d_un
.d_val
;
5876 if (dyn
->d_tag
== DT_NULL
)
5881 Elf32_Dyn
*const dyn
= (Elf32_Dyn
*) buf
;
5882 #ifdef DT_MIPS_RLD_MAP
5886 unsigned char buf
[sizeof (Elf32_Word
)];
5890 if (dyn
->d_tag
== DT_MIPS_RLD_MAP
)
5892 if (linux_read_memory (dyn
->d_un
.d_val
,
5893 rld_map
.buf
, sizeof (rld_map
.buf
)) == 0)
5898 #endif /* DT_MIPS_RLD_MAP */
5900 if (dyn
->d_tag
== DT_DEBUG
&& map
== -1)
5901 map
= dyn
->d_un
.d_val
;
5903 if (dyn
->d_tag
== DT_NULL
)
5907 dynamic_memaddr
+= dyn_size
;
5913 /* Read one pointer from MEMADDR in the inferior. */
5916 read_one_ptr (CORE_ADDR memaddr
, CORE_ADDR
*ptr
, int ptr_size
)
5920 /* Go through a union so this works on either big or little endian
5921 hosts, when the inferior's pointer size is smaller than the size
5922 of CORE_ADDR. It is assumed the inferior's endianness is the
5923 same of the superior's. */
5926 CORE_ADDR core_addr
;
5931 ret
= linux_read_memory (memaddr
, &addr
.uc
, ptr_size
);
5934 if (ptr_size
== sizeof (CORE_ADDR
))
5935 *ptr
= addr
.core_addr
;
5936 else if (ptr_size
== sizeof (unsigned int))
5939 gdb_assert_not_reached ("unhandled pointer size");
5944 struct link_map_offsets
5946 /* Offset and size of r_debug.r_version. */
5947 int r_version_offset
;
5949 /* Offset and size of r_debug.r_map. */
5952 /* Offset to l_addr field in struct link_map. */
5955 /* Offset to l_name field in struct link_map. */
5958 /* Offset to l_ld field in struct link_map. */
5961 /* Offset to l_next field in struct link_map. */
5964 /* Offset to l_prev field in struct link_map. */
5968 /* Construct qXfer:libraries-svr4:read reply. */
5971 linux_qxfer_libraries_svr4 (const char *annex
, unsigned char *readbuf
,
5972 unsigned const char *writebuf
,
5973 CORE_ADDR offset
, int len
)
5976 unsigned document_len
;
5977 struct process_info_private
*const priv
= current_process ()->priv
;
5978 char filename
[PATH_MAX
];
5981 static const struct link_map_offsets lmo_32bit_offsets
=
5983 0, /* r_version offset. */
5984 4, /* r_debug.r_map offset. */
5985 0, /* l_addr offset in link_map. */
5986 4, /* l_name offset in link_map. */
5987 8, /* l_ld offset in link_map. */
5988 12, /* l_next offset in link_map. */
5989 16 /* l_prev offset in link_map. */
5992 static const struct link_map_offsets lmo_64bit_offsets
=
5994 0, /* r_version offset. */
5995 8, /* r_debug.r_map offset. */
5996 0, /* l_addr offset in link_map. */
5997 8, /* l_name offset in link_map. */
5998 16, /* l_ld offset in link_map. */
5999 24, /* l_next offset in link_map. */
6000 32 /* l_prev offset in link_map. */
6002 const struct link_map_offsets
*lmo
;
6003 unsigned int machine
;
6005 CORE_ADDR lm_addr
= 0, lm_prev
= 0;
6006 int allocated
= 1024;
6008 CORE_ADDR l_name
, l_addr
, l_ld
, l_next
, l_prev
;
6009 int header_done
= 0;
6011 if (writebuf
!= NULL
)
6013 if (readbuf
== NULL
)
6016 pid
= lwpid_of (current_thread
);
6017 xsnprintf (filename
, sizeof filename
, "/proc/%d/exe", pid
);
6018 is_elf64
= elf_64_file_p (filename
, &machine
);
6019 lmo
= is_elf64
? &lmo_64bit_offsets
: &lmo_32bit_offsets
;
6020 ptr_size
= is_elf64
? 8 : 4;
6022 while (annex
[0] != '\0')
6028 sep
= strchr (annex
, '=');
6033 if (len
== 5 && startswith (annex
, "start"))
6035 else if (len
== 4 && startswith (annex
, "prev"))
6039 annex
= strchr (sep
, ';');
6046 annex
= decode_address_to_semicolon (addrp
, sep
+ 1);
6053 if (priv
->r_debug
== 0)
6054 priv
->r_debug
= get_r_debug (pid
, is_elf64
);
6056 /* We failed to find DT_DEBUG. Such situation will not change
6057 for this inferior - do not retry it. Report it to GDB as
6058 E01, see for the reasons at the GDB solib-svr4.c side. */
6059 if (priv
->r_debug
== (CORE_ADDR
) -1)
6062 if (priv
->r_debug
!= 0)
6064 if (linux_read_memory (priv
->r_debug
+ lmo
->r_version_offset
,
6065 (unsigned char *) &r_version
,
6066 sizeof (r_version
)) != 0
6069 warning ("unexpected r_debug version %d", r_version
);
6071 else if (read_one_ptr (priv
->r_debug
+ lmo
->r_map_offset
,
6072 &lm_addr
, ptr_size
) != 0)
6074 warning ("unable to read r_map from 0x%lx",
6075 (long) priv
->r_debug
+ lmo
->r_map_offset
);
6080 document
= xmalloc (allocated
);
6081 strcpy (document
, "<library-list-svr4 version=\"1.0\"");
6082 p
= document
+ strlen (document
);
6085 && read_one_ptr (lm_addr
+ lmo
->l_name_offset
,
6086 &l_name
, ptr_size
) == 0
6087 && read_one_ptr (lm_addr
+ lmo
->l_addr_offset
,
6088 &l_addr
, ptr_size
) == 0
6089 && read_one_ptr (lm_addr
+ lmo
->l_ld_offset
,
6090 &l_ld
, ptr_size
) == 0
6091 && read_one_ptr (lm_addr
+ lmo
->l_prev_offset
,
6092 &l_prev
, ptr_size
) == 0
6093 && read_one_ptr (lm_addr
+ lmo
->l_next_offset
,
6094 &l_next
, ptr_size
) == 0)
6096 unsigned char libname
[PATH_MAX
];
6098 if (lm_prev
!= l_prev
)
6100 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6101 (long) lm_prev
, (long) l_prev
);
6105 /* Ignore the first entry even if it has valid name as the first entry
6106 corresponds to the main executable. The first entry should not be
6107 skipped if the dynamic loader was loaded late by a static executable
6108 (see solib-svr4.c parameter ignore_first). But in such case the main
6109 executable does not have PT_DYNAMIC present and this function already
6110 exited above due to failed get_r_debug. */
6113 sprintf (p
, " main-lm=\"0x%lx\"", (unsigned long) lm_addr
);
6118 /* Not checking for error because reading may stop before
6119 we've got PATH_MAX worth of characters. */
6121 linux_read_memory (l_name
, libname
, sizeof (libname
) - 1);
6122 libname
[sizeof (libname
) - 1] = '\0';
6123 if (libname
[0] != '\0')
6125 /* 6x the size for xml_escape_text below. */
6126 size_t len
= 6 * strlen ((char *) libname
);
6131 /* Terminate `<library-list-svr4'. */
6136 while (allocated
< p
- document
+ len
+ 200)
6138 /* Expand to guarantee sufficient storage. */
6139 uintptr_t document_len
= p
- document
;
6141 document
= xrealloc (document
, 2 * allocated
);
6143 p
= document
+ document_len
;
6146 name
= xml_escape_text ((char *) libname
);
6147 p
+= sprintf (p
, "<library name=\"%s\" lm=\"0x%lx\" "
6148 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6149 name
, (unsigned long) lm_addr
,
6150 (unsigned long) l_addr
, (unsigned long) l_ld
);
6161 /* Empty list; terminate `<library-list-svr4'. */
6165 strcpy (p
, "</library-list-svr4>");
6167 document_len
= strlen (document
);
6168 if (offset
< document_len
)
6169 document_len
-= offset
;
6172 if (len
> document_len
)
6175 memcpy (readbuf
, document
+ offset
, len
);
6181 #ifdef HAVE_LINUX_BTRACE
6183 /* See to_enable_btrace target method. */
6185 static struct btrace_target_info
*
6186 linux_low_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
6188 struct btrace_target_info
*tinfo
;
6190 tinfo
= linux_enable_btrace (ptid
, conf
);
6192 if (tinfo
!= NULL
&& tinfo
->ptr_bits
== 0)
6194 struct thread_info
*thread
= find_thread_ptid (ptid
);
6195 struct regcache
*regcache
= get_thread_regcache (thread
, 0);
6197 tinfo
->ptr_bits
= register_size (regcache
->tdesc
, 0) * 8;
6203 /* See to_disable_btrace target method. */
6206 linux_low_disable_btrace (struct btrace_target_info
*tinfo
)
6208 enum btrace_error err
;
6210 err
= linux_disable_btrace (tinfo
);
6211 return (err
== BTRACE_ERR_NONE
? 0 : -1);
6214 /* See to_read_btrace target method. */
6217 linux_low_read_btrace (struct btrace_target_info
*tinfo
, struct buffer
*buffer
,
6220 struct btrace_data btrace
;
6221 struct btrace_block
*block
;
6222 enum btrace_error err
;
6225 btrace_data_init (&btrace
);
6227 err
= linux_read_btrace (&btrace
, tinfo
, type
);
6228 if (err
!= BTRACE_ERR_NONE
)
6230 if (err
== BTRACE_ERR_OVERFLOW
)
6231 buffer_grow_str0 (buffer
, "E.Overflow.");
6233 buffer_grow_str0 (buffer
, "E.Generic Error.");
6235 btrace_data_fini (&btrace
);
6239 switch (btrace
.format
)
6241 case BTRACE_FORMAT_NONE
:
6242 buffer_grow_str0 (buffer
, "E.No Trace.");
6245 case BTRACE_FORMAT_BTS
:
6246 buffer_grow_str (buffer
, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6247 buffer_grow_str (buffer
, "<btrace version=\"1.0\">\n");
6250 VEC_iterate (btrace_block_s
, btrace
.variant
.bts
.blocks
, i
, block
);
6252 buffer_xml_printf (buffer
, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6253 paddress (block
->begin
), paddress (block
->end
));
6255 buffer_grow_str0 (buffer
, "</btrace>\n");
6259 buffer_grow_str0 (buffer
, "E.Unknown Trace Format.");
6261 btrace_data_fini (&btrace
);
6265 btrace_data_fini (&btrace
);
6269 /* See to_btrace_conf target method. */
6272 linux_low_btrace_conf (const struct btrace_target_info
*tinfo
,
6273 struct buffer
*buffer
)
6275 const struct btrace_config
*conf
;
6277 buffer_grow_str (buffer
, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6278 buffer_grow_str (buffer
, "<btrace-conf version=\"1.0\">\n");
6280 conf
= linux_btrace_conf (tinfo
);
6283 switch (conf
->format
)
6285 case BTRACE_FORMAT_NONE
:
6288 case BTRACE_FORMAT_BTS
:
6289 buffer_xml_printf (buffer
, "<bts");
6290 buffer_xml_printf (buffer
, " size=\"0x%x\"", conf
->bts
.size
);
6291 buffer_xml_printf (buffer
, " />\n");
6296 buffer_grow_str0 (buffer
, "</btrace-conf>\n");
6299 #endif /* HAVE_LINUX_BTRACE */
6301 /* See nat/linux-nat.h. */
6304 current_lwp_ptid (void)
6306 return ptid_of (current_thread
);
6309 static struct target_ops linux_target_ops
= {
6310 linux_create_inferior
,
6319 linux_fetch_registers
,
6320 linux_store_registers
,
6321 linux_prepare_to_access_memory
,
6322 linux_done_accessing_memory
,
6325 linux_look_up_symbols
,
6326 linux_request_interrupt
,
6328 linux_supports_z_point_type
,
6331 linux_stopped_by_sw_breakpoint
,
6332 linux_supports_stopped_by_sw_breakpoint
,
6333 linux_stopped_by_hw_breakpoint
,
6334 linux_supports_stopped_by_hw_breakpoint
,
6335 linux_stopped_by_watchpoint
,
6336 linux_stopped_data_address
,
6337 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6338 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6339 && defined(PT_TEXT_END_ADDR)
6344 #ifdef USE_THREAD_DB
6345 thread_db_get_tls_address
,
6350 hostio_last_error_from_errno
,
6353 linux_supports_non_stop
,
6355 linux_start_non_stop
,
6356 linux_supports_multi_process
,
6357 #ifdef USE_THREAD_DB
6358 thread_db_handle_monitor_command
,
6362 linux_common_core_of_thread
,
6364 linux_process_qsupported
,
6365 linux_supports_tracepoints
,
6368 linux_thread_stopped
,
6372 linux_stabilize_threads
,
6373 linux_install_fast_tracepoint_jump_pad
,
6375 linux_supports_disable_randomization
,
6376 linux_get_min_fast_tracepoint_insn_len
,
6377 linux_qxfer_libraries_svr4
,
6378 linux_supports_agent
,
6379 #ifdef HAVE_LINUX_BTRACE
6380 linux_supports_btrace
,
6381 linux_low_enable_btrace
,
6382 linux_low_disable_btrace
,
6383 linux_low_read_btrace
,
6384 linux_low_btrace_conf
,
6392 linux_supports_range_stepping
,
6396 linux_init_signals ()
6398 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6399 to find what the cancel signal actually is. */
6400 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6401 signal (__SIGRTMIN
+1, SIG_IGN
);
6405 #ifdef HAVE_LINUX_REGSETS
6407 initialize_regsets_info (struct regsets_info
*info
)
6409 for (info
->num_regsets
= 0;
6410 info
->regsets
[info
->num_regsets
].size
>= 0;
6411 info
->num_regsets
++)
6417 initialize_low (void)
6419 struct sigaction sigchld_action
;
6420 memset (&sigchld_action
, 0, sizeof (sigchld_action
));
6421 set_target_ops (&linux_target_ops
);
6422 set_breakpoint_data (the_low_target
.breakpoint
,
6423 the_low_target
.breakpoint_len
);
6424 linux_init_signals ();
6425 linux_ptrace_init_warnings ();
6427 sigchld_action
.sa_handler
= sigchld_handler
;
6428 sigemptyset (&sigchld_action
.sa_mask
);
6429 sigchld_action
.sa_flags
= SA_RESTART
;
6430 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
6432 initialize_low_arch ();