1 /* Target-struct-independent code to start (run) and stop an inferior
4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
27 #include "breakpoint.h"
31 #include "target-connection.h"
32 #include "gdbthread.h"
39 #include "observable.h"
44 #include "mi/mi-common.h"
45 #include "event-top.h"
47 #include "record-full.h"
48 #include "inline-frame.h"
50 #include "tracepoint.h"
54 #include "completer.h"
55 #include "target-descriptions.h"
56 #include "target-dcache.h"
59 #include "gdbsupport/event-loop.h"
60 #include "thread-fsm.h"
61 #include "gdbsupport/enum-flags.h"
62 #include "progspace-and-thread.h"
63 #include "gdbsupport/gdb_optional.h"
64 #include "arch-utils.h"
65 #include "gdbsupport/scope-exit.h"
66 #include "gdbsupport/forward-scope-exit.h"
67 #include "gdbsupport/gdb_select.h"
68 #include <unordered_map>
69 #include "async-event.h"
70 #include "gdbsupport/selftest.h"
71 #include "scoped-mock-context.h"
72 #include "test-target.h"
74 /* Prototypes for local functions */
76 static void sig_print_info (enum gdb_signal
);
78 static void sig_print_header (void);
80 static void follow_inferior_reset_breakpoints (void);
82 static int currently_stepping (struct thread_info
*tp
);
84 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info
*);
86 static void insert_step_resume_breakpoint_at_caller (struct frame_info
*);
88 static void insert_longjmp_resume_breakpoint (struct gdbarch
*, CORE_ADDR
);
90 static int maybe_software_singlestep (struct gdbarch
*gdbarch
, CORE_ADDR pc
);
92 static void resume (gdb_signal sig
);
94 static void wait_for_inferior (inferior
*inf
);
96 /* Asynchronous signal handler registered as event loop source for
97 when we have pending events ready to be passed to the core. */
98 static struct async_event_handler
*infrun_async_inferior_event_token
;
100 /* Stores whether infrun_async was previously enabled or disabled.
101 Starts off as -1, indicating "never enabled/disabled". */
102 static int infrun_is_async
= -1;
107 infrun_async (int enable
)
109 if (infrun_is_async
!= enable
)
111 infrun_is_async
= enable
;
114 fprintf_unfiltered (gdb_stdlog
,
115 "infrun: infrun_async(%d)\n",
119 mark_async_event_handler (infrun_async_inferior_event_token
);
121 clear_async_event_handler (infrun_async_inferior_event_token
);
128 mark_infrun_async_event_handler (void)
130 mark_async_event_handler (infrun_async_inferior_event_token
);
133 /* When set, stop the 'step' command if we enter a function which has
134 no line number information. The normal behavior is that we step
135 over such function. */
136 bool step_stop_if_no_debug
= false;
138 show_step_stop_if_no_debug (struct ui_file
*file
, int from_tty
,
139 struct cmd_list_element
*c
, const char *value
)
141 fprintf_filtered (file
, _("Mode of the step operation is %s.\n"), value
);
144 /* proceed and normal_stop use this to notify the user when the
145 inferior stopped in a different thread than it had been running
148 static ptid_t previous_inferior_ptid
;
150 /* If set (default for legacy reasons), when following a fork, GDB
151 will detach from one of the fork branches, child or parent.
152 Exactly which branch is detached depends on 'set follow-fork-mode'
155 static bool detach_fork
= true;
157 bool debug_displaced
= false;
159 show_debug_displaced (struct ui_file
*file
, int from_tty
,
160 struct cmd_list_element
*c
, const char *value
)
162 fprintf_filtered (file
, _("Displace stepping debugging is %s.\n"), value
);
165 unsigned int debug_infrun
= 0;
167 show_debug_infrun (struct ui_file
*file
, int from_tty
,
168 struct cmd_list_element
*c
, const char *value
)
170 fprintf_filtered (file
, _("Inferior debugging is %s.\n"), value
);
174 /* Support for disabling address space randomization. */
176 bool disable_randomization
= true;
179 show_disable_randomization (struct ui_file
*file
, int from_tty
,
180 struct cmd_list_element
*c
, const char *value
)
182 if (target_supports_disable_randomization ())
183 fprintf_filtered (file
,
184 _("Disabling randomization of debuggee's "
185 "virtual address space is %s.\n"),
188 fputs_filtered (_("Disabling randomization of debuggee's "
189 "virtual address space is unsupported on\n"
190 "this platform.\n"), file
);
194 set_disable_randomization (const char *args
, int from_tty
,
195 struct cmd_list_element
*c
)
197 if (!target_supports_disable_randomization ())
198 error (_("Disabling randomization of debuggee's "
199 "virtual address space is unsupported on\n"
203 /* User interface for non-stop mode. */
205 bool non_stop
= false;
206 static bool non_stop_1
= false;
209 set_non_stop (const char *args
, int from_tty
,
210 struct cmd_list_element
*c
)
212 if (target_has_execution
)
214 non_stop_1
= non_stop
;
215 error (_("Cannot change this setting while the inferior is running."));
218 non_stop
= non_stop_1
;
222 show_non_stop (struct ui_file
*file
, int from_tty
,
223 struct cmd_list_element
*c
, const char *value
)
225 fprintf_filtered (file
,
226 _("Controlling the inferior in non-stop mode is %s.\n"),
230 /* "Observer mode" is somewhat like a more extreme version of
231 non-stop, in which all GDB operations that might affect the
232 target's execution have been disabled. */
234 bool observer_mode
= false;
235 static bool observer_mode_1
= false;
238 set_observer_mode (const char *args
, int from_tty
,
239 struct cmd_list_element
*c
)
241 if (target_has_execution
)
243 observer_mode_1
= observer_mode
;
244 error (_("Cannot change this setting while the inferior is running."));
247 observer_mode
= observer_mode_1
;
249 may_write_registers
= !observer_mode
;
250 may_write_memory
= !observer_mode
;
251 may_insert_breakpoints
= !observer_mode
;
252 may_insert_tracepoints
= !observer_mode
;
253 /* We can insert fast tracepoints in or out of observer mode,
254 but enable them if we're going into this mode. */
256 may_insert_fast_tracepoints
= true;
257 may_stop
= !observer_mode
;
258 update_target_permissions ();
260 /* Going *into* observer mode we must force non-stop, then
261 going out we leave it that way. */
264 pagination_enabled
= 0;
265 non_stop
= non_stop_1
= true;
269 printf_filtered (_("Observer mode is now %s.\n"),
270 (observer_mode
? "on" : "off"));
274 show_observer_mode (struct ui_file
*file
, int from_tty
,
275 struct cmd_list_element
*c
, const char *value
)
277 fprintf_filtered (file
, _("Observer mode is %s.\n"), value
);
280 /* This updates the value of observer mode based on changes in
281 permissions. Note that we are deliberately ignoring the values of
282 may-write-registers and may-write-memory, since the user may have
283 reason to enable these during a session, for instance to turn on a
284 debugging-related global. */
287 update_observer_mode (void)
289 bool newval
= (!may_insert_breakpoints
290 && !may_insert_tracepoints
291 && may_insert_fast_tracepoints
295 /* Let the user know if things change. */
296 if (newval
!= observer_mode
)
297 printf_filtered (_("Observer mode is now %s.\n"),
298 (newval
? "on" : "off"));
300 observer_mode
= observer_mode_1
= newval
;
303 /* Tables of how to react to signals; the user sets them. */
305 static unsigned char signal_stop
[GDB_SIGNAL_LAST
];
306 static unsigned char signal_print
[GDB_SIGNAL_LAST
];
307 static unsigned char signal_program
[GDB_SIGNAL_LAST
];
309 /* Table of signals that are registered with "catch signal". A
310 non-zero entry indicates that the signal is caught by some "catch
312 static unsigned char signal_catch
[GDB_SIGNAL_LAST
];
314 /* Table of signals that the target may silently handle.
315 This is automatically determined from the flags above,
316 and simply cached here. */
317 static unsigned char signal_pass
[GDB_SIGNAL_LAST
];
319 #define SET_SIGS(nsigs,sigs,flags) \
321 int signum = (nsigs); \
322 while (signum-- > 0) \
323 if ((sigs)[signum]) \
324 (flags)[signum] = 1; \
327 #define UNSET_SIGS(nsigs,sigs,flags) \
329 int signum = (nsigs); \
330 while (signum-- > 0) \
331 if ((sigs)[signum]) \
332 (flags)[signum] = 0; \
335 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
336 this function is to avoid exporting `signal_program'. */
339 update_signals_program_target (void)
341 target_program_signals (signal_program
);
344 /* Value to pass to target_resume() to cause all threads to resume. */
346 #define RESUME_ALL minus_one_ptid
348 /* Command list pointer for the "stop" placeholder. */
350 static struct cmd_list_element
*stop_command
;
352 /* Nonzero if we want to give control to the user when we're notified
353 of shared library events by the dynamic linker. */
354 int stop_on_solib_events
;
356 /* Enable or disable optional shared library event breakpoints
357 as appropriate when the above flag is changed. */
360 set_stop_on_solib_events (const char *args
,
361 int from_tty
, struct cmd_list_element
*c
)
363 update_solib_breakpoints ();
367 show_stop_on_solib_events (struct ui_file
*file
, int from_tty
,
368 struct cmd_list_element
*c
, const char *value
)
370 fprintf_filtered (file
, _("Stopping for shared library events is %s.\n"),
374 /* Nonzero after stop if current stack frame should be printed. */
376 static int stop_print_frame
;
378 /* This is a cached copy of the target/ptid/waitstatus of the last
379 event returned by target_wait()/deprecated_target_wait_hook().
380 This information is returned by get_last_target_status(). */
381 static process_stratum_target
*target_last_proc_target
;
382 static ptid_t target_last_wait_ptid
;
383 static struct target_waitstatus target_last_waitstatus
;
385 void init_thread_stepping_state (struct thread_info
*tss
);
387 static const char follow_fork_mode_child
[] = "child";
388 static const char follow_fork_mode_parent
[] = "parent";
390 static const char *const follow_fork_mode_kind_names
[] = {
391 follow_fork_mode_child
,
392 follow_fork_mode_parent
,
396 static const char *follow_fork_mode_string
= follow_fork_mode_parent
;
398 show_follow_fork_mode_string (struct ui_file
*file
, int from_tty
,
399 struct cmd_list_element
*c
, const char *value
)
401 fprintf_filtered (file
,
402 _("Debugger response to a program "
403 "call of fork or vfork is \"%s\".\n"),
408 /* Handle changes to the inferior list based on the type of fork,
409 which process is being followed, and whether the other process
410 should be detached. On entry inferior_ptid must be the ptid of
411 the fork parent. At return inferior_ptid is the ptid of the
412 followed inferior. */
415 follow_fork_inferior (bool follow_child
, bool detach_fork
)
418 ptid_t parent_ptid
, child_ptid
;
420 has_vforked
= (inferior_thread ()->pending_follow
.kind
421 == TARGET_WAITKIND_VFORKED
);
422 parent_ptid
= inferior_ptid
;
423 child_ptid
= inferior_thread ()->pending_follow
.value
.related_pid
;
426 && !non_stop
/* Non-stop always resumes both branches. */
427 && current_ui
->prompt_state
== PROMPT_BLOCKED
428 && !(follow_child
|| detach_fork
|| sched_multi
))
430 /* The parent stays blocked inside the vfork syscall until the
431 child execs or exits. If we don't let the child run, then
432 the parent stays blocked. If we're telling the parent to run
433 in the foreground, the user will not be able to ctrl-c to get
434 back the terminal, effectively hanging the debug session. */
435 fprintf_filtered (gdb_stderr
, _("\
436 Can not resume the parent process over vfork in the foreground while\n\
437 holding the child stopped. Try \"set detach-on-fork\" or \
438 \"set schedule-multiple\".\n"));
444 /* Detach new forked process? */
447 /* Before detaching from the child, remove all breakpoints
448 from it. If we forked, then this has already been taken
449 care of by infrun.c. If we vforked however, any
450 breakpoint inserted in the parent is visible in the
451 child, even those added while stopped in a vfork
452 catchpoint. This will remove the breakpoints from the
453 parent also, but they'll be reinserted below. */
456 /* Keep breakpoints list in sync. */
457 remove_breakpoints_inf (current_inferior ());
460 if (print_inferior_events
)
462 /* Ensure that we have a process ptid. */
463 ptid_t process_ptid
= ptid_t (child_ptid
.pid ());
465 target_terminal::ours_for_output ();
466 fprintf_filtered (gdb_stdlog
,
467 _("[Detaching after %s from child %s]\n"),
468 has_vforked
? "vfork" : "fork",
469 target_pid_to_str (process_ptid
).c_str ());
474 struct inferior
*parent_inf
, *child_inf
;
476 /* Add process to GDB's tables. */
477 child_inf
= add_inferior (child_ptid
.pid ());
479 parent_inf
= current_inferior ();
480 child_inf
->attach_flag
= parent_inf
->attach_flag
;
481 copy_terminal_info (child_inf
, parent_inf
);
482 child_inf
->gdbarch
= parent_inf
->gdbarch
;
483 copy_inferior_target_desc_info (child_inf
, parent_inf
);
485 scoped_restore_current_pspace_and_thread restore_pspace_thread
;
487 set_current_inferior (child_inf
);
488 switch_to_no_thread ();
489 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
490 push_target (parent_inf
->process_target ());
491 thread_info
*child_thr
492 = add_thread_silent (child_inf
->process_target (), child_ptid
);
494 /* If this is a vfork child, then the address-space is
495 shared with the parent. */
498 child_inf
->pspace
= parent_inf
->pspace
;
499 child_inf
->aspace
= parent_inf
->aspace
;
503 /* The parent will be frozen until the child is done
504 with the shared region. Keep track of the
506 child_inf
->vfork_parent
= parent_inf
;
507 child_inf
->pending_detach
= 0;
508 parent_inf
->vfork_child
= child_inf
;
509 parent_inf
->pending_detach
= 0;
511 /* Now that the inferiors and program spaces are all
512 wired up, we can switch to the child thread (which
513 switches inferior and program space too). */
514 switch_to_thread (child_thr
);
518 child_inf
->aspace
= new_address_space ();
519 child_inf
->pspace
= new program_space (child_inf
->aspace
);
520 child_inf
->removable
= 1;
521 set_current_program_space (child_inf
->pspace
);
522 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
524 /* solib_create_inferior_hook relies on the current
526 switch_to_thread (child_thr
);
528 /* Let the shared library layer (e.g., solib-svr4) learn
529 about this new process, relocate the cloned exec, pull
530 in shared libraries, and install the solib event
531 breakpoint. If a "cloned-VM" event was propagated
532 better throughout the core, this wouldn't be
534 solib_create_inferior_hook (0);
540 struct inferior
*parent_inf
;
542 parent_inf
= current_inferior ();
544 /* If we detached from the child, then we have to be careful
545 to not insert breakpoints in the parent until the child
546 is done with the shared memory region. However, if we're
547 staying attached to the child, then we can and should
548 insert breakpoints, so that we can debug it. A
549 subsequent child exec or exit is enough to know when does
550 the child stops using the parent's address space. */
551 parent_inf
->waiting_for_vfork_done
= detach_fork
;
552 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
557 /* Follow the child. */
558 struct inferior
*parent_inf
, *child_inf
;
559 struct program_space
*parent_pspace
;
561 if (print_inferior_events
)
563 std::string parent_pid
= target_pid_to_str (parent_ptid
);
564 std::string child_pid
= target_pid_to_str (child_ptid
);
566 target_terminal::ours_for_output ();
567 fprintf_filtered (gdb_stdlog
,
568 _("[Attaching after %s %s to child %s]\n"),
570 has_vforked
? "vfork" : "fork",
574 /* Add the new inferior first, so that the target_detach below
575 doesn't unpush the target. */
577 child_inf
= add_inferior (child_ptid
.pid ());
579 parent_inf
= current_inferior ();
580 child_inf
->attach_flag
= parent_inf
->attach_flag
;
581 copy_terminal_info (child_inf
, parent_inf
);
582 child_inf
->gdbarch
= parent_inf
->gdbarch
;
583 copy_inferior_target_desc_info (child_inf
, parent_inf
);
585 parent_pspace
= parent_inf
->pspace
;
587 process_stratum_target
*target
= parent_inf
->process_target ();
590 /* Hold a strong reference to the target while (maybe)
591 detaching the parent. Otherwise detaching could close the
593 auto target_ref
= target_ops_ref::new_reference (target
);
595 /* If we're vforking, we want to hold on to the parent until
596 the child exits or execs. At child exec or exit time we
597 can remove the old breakpoints from the parent and detach
598 or resume debugging it. Otherwise, detach the parent now;
599 we'll want to reuse it's program/address spaces, but we
600 can't set them to the child before removing breakpoints
601 from the parent, otherwise, the breakpoints module could
602 decide to remove breakpoints from the wrong process (since
603 they'd be assigned to the same address space). */
607 gdb_assert (child_inf
->vfork_parent
== NULL
);
608 gdb_assert (parent_inf
->vfork_child
== NULL
);
609 child_inf
->vfork_parent
= parent_inf
;
610 child_inf
->pending_detach
= 0;
611 parent_inf
->vfork_child
= child_inf
;
612 parent_inf
->pending_detach
= detach_fork
;
613 parent_inf
->waiting_for_vfork_done
= 0;
615 else if (detach_fork
)
617 if (print_inferior_events
)
619 /* Ensure that we have a process ptid. */
620 ptid_t process_ptid
= ptid_t (parent_ptid
.pid ());
622 target_terminal::ours_for_output ();
623 fprintf_filtered (gdb_stdlog
,
624 _("[Detaching after fork from "
626 target_pid_to_str (process_ptid
).c_str ());
629 target_detach (parent_inf
, 0);
633 /* Note that the detach above makes PARENT_INF dangling. */
635 /* Add the child thread to the appropriate lists, and switch
636 to this new thread, before cloning the program space, and
637 informing the solib layer about this new process. */
639 set_current_inferior (child_inf
);
640 push_target (target
);
643 thread_info
*child_thr
= add_thread_silent (target
, child_ptid
);
645 /* If this is a vfork child, then the address-space is shared
646 with the parent. If we detached from the parent, then we can
647 reuse the parent's program/address spaces. */
648 if (has_vforked
|| detach_fork
)
650 child_inf
->pspace
= parent_pspace
;
651 child_inf
->aspace
= child_inf
->pspace
->aspace
;
657 child_inf
->aspace
= new_address_space ();
658 child_inf
->pspace
= new program_space (child_inf
->aspace
);
659 child_inf
->removable
= 1;
660 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
661 set_current_program_space (child_inf
->pspace
);
662 clone_program_space (child_inf
->pspace
, parent_pspace
);
664 /* Let the shared library layer (e.g., solib-svr4) learn
665 about this new process, relocate the cloned exec, pull in
666 shared libraries, and install the solib event breakpoint.
667 If a "cloned-VM" event was propagated better throughout
668 the core, this wouldn't be required. */
669 solib_create_inferior_hook (0);
672 switch_to_thread (child_thr
);
675 return target_follow_fork (follow_child
, detach_fork
);
678 /* Tell the target to follow the fork we're stopped at. Returns true
679 if the inferior should be resumed; false, if the target for some
680 reason decided it's best not to resume. */
685 bool follow_child
= (follow_fork_mode_string
== follow_fork_mode_child
);
686 bool should_resume
= true;
687 struct thread_info
*tp
;
689 /* Copy user stepping state to the new inferior thread. FIXME: the
690 followed fork child thread should have a copy of most of the
691 parent thread structure's run control related fields, not just these.
692 Initialized to avoid "may be used uninitialized" warnings from gcc. */
693 struct breakpoint
*step_resume_breakpoint
= NULL
;
694 struct breakpoint
*exception_resume_breakpoint
= NULL
;
695 CORE_ADDR step_range_start
= 0;
696 CORE_ADDR step_range_end
= 0;
697 int current_line
= 0;
698 symtab
*current_symtab
= NULL
;
699 struct frame_id step_frame_id
= { 0 };
700 struct thread_fsm
*thread_fsm
= NULL
;
704 process_stratum_target
*wait_target
;
706 struct target_waitstatus wait_status
;
708 /* Get the last target status returned by target_wait(). */
709 get_last_target_status (&wait_target
, &wait_ptid
, &wait_status
);
711 /* If not stopped at a fork event, then there's nothing else to
713 if (wait_status
.kind
!= TARGET_WAITKIND_FORKED
714 && wait_status
.kind
!= TARGET_WAITKIND_VFORKED
)
717 /* Check if we switched over from WAIT_PTID, since the event was
719 if (wait_ptid
!= minus_one_ptid
720 && (current_inferior ()->process_target () != wait_target
721 || inferior_ptid
!= wait_ptid
))
723 /* We did. Switch back to WAIT_PTID thread, to tell the
724 target to follow it (in either direction). We'll
725 afterwards refuse to resume, and inform the user what
727 thread_info
*wait_thread
= find_thread_ptid (wait_target
, wait_ptid
);
728 switch_to_thread (wait_thread
);
729 should_resume
= false;
733 tp
= inferior_thread ();
735 /* If there were any forks/vforks that were caught and are now to be
736 followed, then do so now. */
737 switch (tp
->pending_follow
.kind
)
739 case TARGET_WAITKIND_FORKED
:
740 case TARGET_WAITKIND_VFORKED
:
742 ptid_t parent
, child
;
744 /* If the user did a next/step, etc, over a fork call,
745 preserve the stepping state in the fork child. */
746 if (follow_child
&& should_resume
)
748 step_resume_breakpoint
= clone_momentary_breakpoint
749 (tp
->control
.step_resume_breakpoint
);
750 step_range_start
= tp
->control
.step_range_start
;
751 step_range_end
= tp
->control
.step_range_end
;
752 current_line
= tp
->current_line
;
753 current_symtab
= tp
->current_symtab
;
754 step_frame_id
= tp
->control
.step_frame_id
;
755 exception_resume_breakpoint
756 = clone_momentary_breakpoint (tp
->control
.exception_resume_breakpoint
);
757 thread_fsm
= tp
->thread_fsm
;
759 /* For now, delete the parent's sr breakpoint, otherwise,
760 parent/child sr breakpoints are considered duplicates,
761 and the child version will not be installed. Remove
762 this when the breakpoints module becomes aware of
763 inferiors and address spaces. */
764 delete_step_resume_breakpoint (tp
);
765 tp
->control
.step_range_start
= 0;
766 tp
->control
.step_range_end
= 0;
767 tp
->control
.step_frame_id
= null_frame_id
;
768 delete_exception_resume_breakpoint (tp
);
769 tp
->thread_fsm
= NULL
;
772 parent
= inferior_ptid
;
773 child
= tp
->pending_follow
.value
.related_pid
;
775 process_stratum_target
*parent_targ
= tp
->inf
->process_target ();
776 /* Set up inferior(s) as specified by the caller, and tell the
777 target to do whatever is necessary to follow either parent
779 if (follow_fork_inferior (follow_child
, detach_fork
))
781 /* Target refused to follow, or there's some other reason
782 we shouldn't resume. */
787 /* This pending follow fork event is now handled, one way
788 or another. The previous selected thread may be gone
789 from the lists by now, but if it is still around, need
790 to clear the pending follow request. */
791 tp
= find_thread_ptid (parent_targ
, parent
);
793 tp
->pending_follow
.kind
= TARGET_WAITKIND_SPURIOUS
;
795 /* This makes sure we don't try to apply the "Switched
796 over from WAIT_PID" logic above. */
797 nullify_last_target_wait_ptid ();
799 /* If we followed the child, switch to it... */
802 thread_info
*child_thr
= find_thread_ptid (parent_targ
, child
);
803 switch_to_thread (child_thr
);
805 /* ... and preserve the stepping state, in case the
806 user was stepping over the fork call. */
809 tp
= inferior_thread ();
810 tp
->control
.step_resume_breakpoint
811 = step_resume_breakpoint
;
812 tp
->control
.step_range_start
= step_range_start
;
813 tp
->control
.step_range_end
= step_range_end
;
814 tp
->current_line
= current_line
;
815 tp
->current_symtab
= current_symtab
;
816 tp
->control
.step_frame_id
= step_frame_id
;
817 tp
->control
.exception_resume_breakpoint
818 = exception_resume_breakpoint
;
819 tp
->thread_fsm
= thread_fsm
;
823 /* If we get here, it was because we're trying to
824 resume from a fork catchpoint, but, the user
825 has switched threads away from the thread that
826 forked. In that case, the resume command
827 issued is most likely not applicable to the
828 child, so just warn, and refuse to resume. */
829 warning (_("Not resuming: switched threads "
830 "before following fork child."));
833 /* Reset breakpoints in the child as appropriate. */
834 follow_inferior_reset_breakpoints ();
839 case TARGET_WAITKIND_SPURIOUS
:
840 /* Nothing to follow. */
843 internal_error (__FILE__
, __LINE__
,
844 "Unexpected pending_follow.kind %d\n",
845 tp
->pending_follow
.kind
);
849 return should_resume
;
853 follow_inferior_reset_breakpoints (void)
855 struct thread_info
*tp
= inferior_thread ();
857 /* Was there a step_resume breakpoint? (There was if the user
858 did a "next" at the fork() call.) If so, explicitly reset its
859 thread number. Cloned step_resume breakpoints are disabled on
860 creation, so enable it here now that it is associated with the
863 step_resumes are a form of bp that are made to be per-thread.
864 Since we created the step_resume bp when the parent process
865 was being debugged, and now are switching to the child process,
866 from the breakpoint package's viewpoint, that's a switch of
867 "threads". We must update the bp's notion of which thread
868 it is for, or it'll be ignored when it triggers. */
870 if (tp
->control
.step_resume_breakpoint
)
872 breakpoint_re_set_thread (tp
->control
.step_resume_breakpoint
);
873 tp
->control
.step_resume_breakpoint
->loc
->enabled
= 1;
876 /* Treat exception_resume breakpoints like step_resume breakpoints. */
877 if (tp
->control
.exception_resume_breakpoint
)
879 breakpoint_re_set_thread (tp
->control
.exception_resume_breakpoint
);
880 tp
->control
.exception_resume_breakpoint
->loc
->enabled
= 1;
883 /* Reinsert all breakpoints in the child. The user may have set
884 breakpoints after catching the fork, in which case those
885 were never set in the child, but only in the parent. This makes
886 sure the inserted breakpoints match the breakpoint list. */
888 breakpoint_re_set ();
889 insert_breakpoints ();
892 /* The child has exited or execed: resume threads of the parent the
893 user wanted to be executing. */
896 proceed_after_vfork_done (struct thread_info
*thread
,
899 int pid
= * (int *) arg
;
901 if (thread
->ptid
.pid () == pid
902 && thread
->state
== THREAD_RUNNING
903 && !thread
->executing
904 && !thread
->stop_requested
905 && thread
->suspend
.stop_signal
== GDB_SIGNAL_0
)
908 fprintf_unfiltered (gdb_stdlog
,
909 "infrun: resuming vfork parent thread %s\n",
910 target_pid_to_str (thread
->ptid
).c_str ());
912 switch_to_thread (thread
);
913 clear_proceed_status (0);
914 proceed ((CORE_ADDR
) -1, GDB_SIGNAL_DEFAULT
);
920 /* Called whenever we notice an exec or exit event, to handle
921 detaching or resuming a vfork parent. */
924 handle_vfork_child_exec_or_exit (int exec
)
926 struct inferior
*inf
= current_inferior ();
928 if (inf
->vfork_parent
)
930 int resume_parent
= -1;
932 /* This exec or exit marks the end of the shared memory region
933 between the parent and the child. Break the bonds. */
934 inferior
*vfork_parent
= inf
->vfork_parent
;
935 inf
->vfork_parent
->vfork_child
= NULL
;
936 inf
->vfork_parent
= NULL
;
938 /* If the user wanted to detach from the parent, now is the
940 if (vfork_parent
->pending_detach
)
942 struct program_space
*pspace
;
943 struct address_space
*aspace
;
945 /* follow-fork child, detach-on-fork on. */
947 vfork_parent
->pending_detach
= 0;
949 scoped_restore_current_pspace_and_thread restore_thread
;
951 /* We're letting loose of the parent. */
952 thread_info
*tp
= any_live_thread_of_inferior (vfork_parent
);
953 switch_to_thread (tp
);
955 /* We're about to detach from the parent, which implicitly
956 removes breakpoints from its address space. There's a
957 catch here: we want to reuse the spaces for the child,
958 but, parent/child are still sharing the pspace at this
959 point, although the exec in reality makes the kernel give
960 the child a fresh set of new pages. The problem here is
961 that the breakpoints module being unaware of this, would
962 likely chose the child process to write to the parent
963 address space. Swapping the child temporarily away from
964 the spaces has the desired effect. Yes, this is "sort
967 pspace
= inf
->pspace
;
968 aspace
= inf
->aspace
;
972 if (print_inferior_events
)
975 = target_pid_to_str (ptid_t (vfork_parent
->pid
));
977 target_terminal::ours_for_output ();
981 fprintf_filtered (gdb_stdlog
,
982 _("[Detaching vfork parent %s "
983 "after child exec]\n"), pidstr
.c_str ());
987 fprintf_filtered (gdb_stdlog
,
988 _("[Detaching vfork parent %s "
989 "after child exit]\n"), pidstr
.c_str ());
993 target_detach (vfork_parent
, 0);
996 inf
->pspace
= pspace
;
997 inf
->aspace
= aspace
;
1001 /* We're staying attached to the parent, so, really give the
1002 child a new address space. */
1003 inf
->pspace
= new program_space (maybe_new_address_space ());
1004 inf
->aspace
= inf
->pspace
->aspace
;
1006 set_current_program_space (inf
->pspace
);
1008 resume_parent
= vfork_parent
->pid
;
1012 /* If this is a vfork child exiting, then the pspace and
1013 aspaces were shared with the parent. Since we're
1014 reporting the process exit, we'll be mourning all that is
1015 found in the address space, and switching to null_ptid,
1016 preparing to start a new inferior. But, since we don't
1017 want to clobber the parent's address/program spaces, we
1018 go ahead and create a new one for this exiting
1021 /* Switch to no-thread while running clone_program_space, so
1022 that clone_program_space doesn't want to read the
1023 selected frame of a dead process. */
1024 scoped_restore_current_thread restore_thread
;
1025 switch_to_no_thread ();
1027 inf
->pspace
= new program_space (maybe_new_address_space ());
1028 inf
->aspace
= inf
->pspace
->aspace
;
1029 set_current_program_space (inf
->pspace
);
1031 inf
->symfile_flags
= SYMFILE_NO_READ
;
1032 clone_program_space (inf
->pspace
, vfork_parent
->pspace
);
1034 resume_parent
= vfork_parent
->pid
;
1037 gdb_assert (current_program_space
== inf
->pspace
);
1039 if (non_stop
&& resume_parent
!= -1)
1041 /* If the user wanted the parent to be running, let it go
1043 scoped_restore_current_thread restore_thread
;
1046 fprintf_unfiltered (gdb_stdlog
,
1047 "infrun: resuming vfork parent process %d\n",
1050 iterate_over_threads (proceed_after_vfork_done
, &resume_parent
);
1055 /* Enum strings for "set|show follow-exec-mode". */
1057 static const char follow_exec_mode_new
[] = "new";
1058 static const char follow_exec_mode_same
[] = "same";
1059 static const char *const follow_exec_mode_names
[] =
1061 follow_exec_mode_new
,
1062 follow_exec_mode_same
,
1066 static const char *follow_exec_mode_string
= follow_exec_mode_same
;
1068 show_follow_exec_mode_string (struct ui_file
*file
, int from_tty
,
1069 struct cmd_list_element
*c
, const char *value
)
1071 fprintf_filtered (file
, _("Follow exec mode is \"%s\".\n"), value
);
1074 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1077 follow_exec (ptid_t ptid
, const char *exec_file_target
)
1079 struct inferior
*inf
= current_inferior ();
1080 int pid
= ptid
.pid ();
1081 ptid_t process_ptid
;
1083 /* Switch terminal for any messages produced e.g. by
1084 breakpoint_re_set. */
1085 target_terminal::ours_for_output ();
1087 /* This is an exec event that we actually wish to pay attention to.
1088 Refresh our symbol table to the newly exec'd program, remove any
1089 momentary bp's, etc.
1091 If there are breakpoints, they aren't really inserted now,
1092 since the exec() transformed our inferior into a fresh set
1095 We want to preserve symbolic breakpoints on the list, since
1096 we have hopes that they can be reset after the new a.out's
1097 symbol table is read.
1099 However, any "raw" breakpoints must be removed from the list
1100 (e.g., the solib bp's), since their address is probably invalid
1103 And, we DON'T want to call delete_breakpoints() here, since
1104 that may write the bp's "shadow contents" (the instruction
1105 value that was overwritten with a TRAP instruction). Since
1106 we now have a new a.out, those shadow contents aren't valid. */
1108 mark_breakpoints_out ();
1110 /* The target reports the exec event to the main thread, even if
1111 some other thread does the exec, and even if the main thread was
1112 stopped or already gone. We may still have non-leader threads of
1113 the process on our list. E.g., on targets that don't have thread
1114 exit events (like remote); or on native Linux in non-stop mode if
1115 there were only two threads in the inferior and the non-leader
1116 one is the one that execs (and nothing forces an update of the
1117 thread list up to here). When debugging remotely, it's best to
1118 avoid extra traffic, when possible, so avoid syncing the thread
1119 list with the target, and instead go ahead and delete all threads
1120 of the process but one that reported the event. Note this must
1121 be done before calling update_breakpoints_after_exec, as
1122 otherwise clearing the threads' resources would reference stale
1123 thread breakpoints -- it may have been one of these threads that
1124 stepped across the exec. We could just clear their stepping
1125 states, but as long as we're iterating, might as well delete
1126 them. Deleting them now rather than at the next user-visible
1127 stop provides a nicer sequence of events for user and MI
1129 for (thread_info
*th
: all_threads_safe ())
1130 if (th
->ptid
.pid () == pid
&& th
->ptid
!= ptid
)
1133 /* We also need to clear any left over stale state for the
1134 leader/event thread. E.g., if there was any step-resume
1135 breakpoint or similar, it's gone now. We cannot truly
1136 step-to-next statement through an exec(). */
1137 thread_info
*th
= inferior_thread ();
1138 th
->control
.step_resume_breakpoint
= NULL
;
1139 th
->control
.exception_resume_breakpoint
= NULL
;
1140 th
->control
.single_step_breakpoints
= NULL
;
1141 th
->control
.step_range_start
= 0;
1142 th
->control
.step_range_end
= 0;
1144 /* The user may have had the main thread held stopped in the
1145 previous image (e.g., schedlock on, or non-stop). Release
1147 th
->stop_requested
= 0;
1149 update_breakpoints_after_exec ();
1151 /* What is this a.out's name? */
1152 process_ptid
= ptid_t (pid
);
1153 printf_unfiltered (_("%s is executing new program: %s\n"),
1154 target_pid_to_str (process_ptid
).c_str (),
1157 /* We've followed the inferior through an exec. Therefore, the
1158 inferior has essentially been killed & reborn. */
1160 breakpoint_init_inferior (inf_execd
);
1162 gdb::unique_xmalloc_ptr
<char> exec_file_host
1163 = exec_file_find (exec_file_target
, NULL
);
1165 /* If we were unable to map the executable target pathname onto a host
1166 pathname, tell the user that. Otherwise GDB's subsequent behavior
1167 is confusing. Maybe it would even be better to stop at this point
1168 so that the user can specify a file manually before continuing. */
1169 if (exec_file_host
== NULL
)
1170 warning (_("Could not load symbols for executable %s.\n"
1171 "Do you need \"set sysroot\"?"),
1174 /* Reset the shared library package. This ensures that we get a
1175 shlib event when the child reaches "_start", at which point the
1176 dld will have had a chance to initialize the child. */
1177 /* Also, loading a symbol file below may trigger symbol lookups, and
1178 we don't want those to be satisfied by the libraries of the
1179 previous incarnation of this process. */
1180 no_shared_libraries (NULL
, 0);
1182 if (follow_exec_mode_string
== follow_exec_mode_new
)
1184 /* The user wants to keep the old inferior and program spaces
1185 around. Create a new fresh one, and switch to it. */
1187 /* Do exit processing for the original inferior before setting the new
1188 inferior's pid. Having two inferiors with the same pid would confuse
1189 find_inferior_p(t)id. Transfer the terminal state and info from the
1190 old to the new inferior. */
1191 inf
= add_inferior_with_spaces ();
1192 swap_terminal_info (inf
, current_inferior ());
1193 exit_inferior_silent (current_inferior ());
1196 target_follow_exec (inf
, exec_file_target
);
1198 inferior
*org_inferior
= current_inferior ();
1199 switch_to_inferior_no_thread (inf
);
1200 push_target (org_inferior
->process_target ());
1201 thread_info
*thr
= add_thread (inf
->process_target (), ptid
);
1202 switch_to_thread (thr
);
1206 /* The old description may no longer be fit for the new image.
1207 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1208 old description; we'll read a new one below. No need to do
1209 this on "follow-exec-mode new", as the old inferior stays
1210 around (its description is later cleared/refetched on
1212 target_clear_description ();
1215 gdb_assert (current_program_space
== inf
->pspace
);
1217 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1218 because the proper displacement for a PIE (Position Independent
1219 Executable) main symbol file will only be computed by
1220 solib_create_inferior_hook below. breakpoint_re_set would fail
1221 to insert the breakpoints with the zero displacement. */
1222 try_open_exec_file (exec_file_host
.get (), inf
, SYMFILE_DEFER_BP_RESET
);
1224 /* If the target can specify a description, read it. Must do this
1225 after flipping to the new executable (because the target supplied
1226 description must be compatible with the executable's
1227 architecture, and the old executable may e.g., be 32-bit, while
1228 the new one 64-bit), and before anything involving memory or
1230 target_find_description ();
1232 solib_create_inferior_hook (0);
1234 jit_inferior_created_hook ();
1236 breakpoint_re_set ();
1238 /* Reinsert all breakpoints. (Those which were symbolic have
1239 been reset to the proper address in the new a.out, thanks
1240 to symbol_file_command...). */
1241 insert_breakpoints ();
1243 /* The next resume of this inferior should bring it to the shlib
1244 startup breakpoints. (If the user had also set bp's on
1245 "main" from the old (parent) process, then they'll auto-
1246 matically get reset there in the new process.). */
1249 /* The queue of threads that need to do a step-over operation to get
1250 past e.g., a breakpoint. What technique is used to step over the
1251 breakpoint/watchpoint does not matter -- all threads end up in the
1252 same queue, to maintain rough temporal order of execution, in order
1253 to avoid starvation, otherwise, we could e.g., find ourselves
1254 constantly stepping the same couple threads past their breakpoints
1255 over and over, if the single-step finish fast enough. */
1256 struct thread_info
*step_over_queue_head
;
1258 /* Bit flags indicating what the thread needs to step over. */
1260 enum step_over_what_flag
1262 /* Step over a breakpoint. */
1263 STEP_OVER_BREAKPOINT
= 1,
1265 /* Step past a non-continuable watchpoint, in order to let the
1266 instruction execute so we can evaluate the watchpoint
1268 STEP_OVER_WATCHPOINT
= 2
1270 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag
, step_over_what
);
1272 /* Info about an instruction that is being stepped over. */
1274 struct step_over_info
1276 /* If we're stepping past a breakpoint, this is the address space
1277 and address of the instruction the breakpoint is set at. We'll
1278 skip inserting all breakpoints here. Valid iff ASPACE is
1280 const address_space
*aspace
;
1283 /* The instruction being stepped over triggers a nonsteppable
1284 watchpoint. If true, we'll skip inserting watchpoints. */
1285 int nonsteppable_watchpoint_p
;
1287 /* The thread's global number. */
1291 /* The step-over info of the location that is being stepped over.
1293 Note that with async/breakpoint always-inserted mode, a user might
1294 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1295 being stepped over. As setting a new breakpoint inserts all
1296 breakpoints, we need to make sure the breakpoint being stepped over
1297 isn't inserted then. We do that by only clearing the step-over
1298 info when the step-over is actually finished (or aborted).
1300 Presently GDB can only step over one breakpoint at any given time.
1301 Given threads that can't run code in the same address space as the
1302 breakpoint's can't really miss the breakpoint, GDB could be taught
1303 to step-over at most one breakpoint per address space (so this info
1304 could move to the address space object if/when GDB is extended).
1305 The set of breakpoints being stepped over will normally be much
1306 smaller than the set of all breakpoints, so a flag in the
1307 breakpoint location structure would be wasteful. A separate list
1308 also saves complexity and run-time, as otherwise we'd have to go
1309 through all breakpoint locations clearing their flag whenever we
1310 start a new sequence. Similar considerations weigh against storing
1311 this info in the thread object. Plus, not all step overs actually
1312 have breakpoint locations -- e.g., stepping past a single-step
1313 breakpoint, or stepping to complete a non-continuable
1315 static struct step_over_info step_over_info
;
1317 /* Record the address of the breakpoint/instruction we're currently
1319 N.B. We record the aspace and address now, instead of say just the thread,
1320 because when we need the info later the thread may be running. */
1323 set_step_over_info (const address_space
*aspace
, CORE_ADDR address
,
1324 int nonsteppable_watchpoint_p
,
1327 step_over_info
.aspace
= aspace
;
1328 step_over_info
.address
= address
;
1329 step_over_info
.nonsteppable_watchpoint_p
= nonsteppable_watchpoint_p
;
1330 step_over_info
.thread
= thread
;
1333 /* Called when we're not longer stepping over a breakpoint / an
1334 instruction, so all breakpoints are free to be (re)inserted. */
1337 clear_step_over_info (void)
1340 fprintf_unfiltered (gdb_stdlog
,
1341 "infrun: clear_step_over_info\n");
1342 step_over_info
.aspace
= NULL
;
1343 step_over_info
.address
= 0;
1344 step_over_info
.nonsteppable_watchpoint_p
= 0;
1345 step_over_info
.thread
= -1;
1351 stepping_past_instruction_at (struct address_space
*aspace
,
1354 return (step_over_info
.aspace
!= NULL
1355 && breakpoint_address_match (aspace
, address
,
1356 step_over_info
.aspace
,
1357 step_over_info
.address
));
1363 thread_is_stepping_over_breakpoint (int thread
)
1365 return (step_over_info
.thread
!= -1
1366 && thread
== step_over_info
.thread
);
1372 stepping_past_nonsteppable_watchpoint (void)
1374 return step_over_info
.nonsteppable_watchpoint_p
;
1377 /* Returns true if step-over info is valid. */
1380 step_over_info_valid_p (void)
1382 return (step_over_info
.aspace
!= NULL
1383 || stepping_past_nonsteppable_watchpoint ());
1387 /* Displaced stepping. */
1389 /* In non-stop debugging mode, we must take special care to manage
1390 breakpoints properly; in particular, the traditional strategy for
1391 stepping a thread past a breakpoint it has hit is unsuitable.
1392 'Displaced stepping' is a tactic for stepping one thread past a
1393 breakpoint it has hit while ensuring that other threads running
1394 concurrently will hit the breakpoint as they should.
1396 The traditional way to step a thread T off a breakpoint in a
1397 multi-threaded program in all-stop mode is as follows:
1399 a0) Initially, all threads are stopped, and breakpoints are not
1401 a1) We single-step T, leaving breakpoints uninserted.
1402 a2) We insert breakpoints, and resume all threads.
1404 In non-stop debugging, however, this strategy is unsuitable: we
1405 don't want to have to stop all threads in the system in order to
1406 continue or step T past a breakpoint. Instead, we use displaced
1409 n0) Initially, T is stopped, other threads are running, and
1410 breakpoints are inserted.
1411 n1) We copy the instruction "under" the breakpoint to a separate
1412 location, outside the main code stream, making any adjustments
1413 to the instruction, register, and memory state as directed by
1415 n2) We single-step T over the instruction at its new location.
1416 n3) We adjust the resulting register and memory state as directed
1417 by T's architecture. This includes resetting T's PC to point
1418 back into the main instruction stream.
1421 This approach depends on the following gdbarch methods:
1423 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1424 indicate where to copy the instruction, and how much space must
1425 be reserved there. We use these in step n1.
1427 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1428 address, and makes any necessary adjustments to the instruction,
1429 register contents, and memory. We use this in step n1.
1431 - gdbarch_displaced_step_fixup adjusts registers and memory after
1432 we have successfully single-stepped the instruction, to yield the
1433 same effect the instruction would have had if we had executed it
1434 at its original address. We use this in step n3.
1436 The gdbarch_displaced_step_copy_insn and
1437 gdbarch_displaced_step_fixup functions must be written so that
1438 copying an instruction with gdbarch_displaced_step_copy_insn,
1439 single-stepping across the copied instruction, and then applying
1440 gdbarch_displaced_insn_fixup should have the same effects on the
1441 thread's memory and registers as stepping the instruction in place
1442 would have. Exactly which responsibilities fall to the copy and
1443 which fall to the fixup is up to the author of those functions.
1445 See the comments in gdbarch.sh for details.
1447 Note that displaced stepping and software single-step cannot
1448 currently be used in combination, although with some care I think
1449 they could be made to. Software single-step works by placing
1450 breakpoints on all possible subsequent instructions; if the
1451 displaced instruction is a PC-relative jump, those breakpoints
1452 could fall in very strange places --- on pages that aren't
1453 executable, or at addresses that are not proper instruction
1454 boundaries. (We do generally let other threads run while we wait
1455 to hit the software single-step breakpoint, and they might
1456 encounter such a corrupted instruction.) One way to work around
1457 this would be to have gdbarch_displaced_step_copy_insn fully
1458 simulate the effect of PC-relative instructions (and return NULL)
1459 on architectures that use software single-stepping.
1461 In non-stop mode, we can have independent and simultaneous step
1462 requests, so more than one thread may need to simultaneously step
1463 over a breakpoint. The current implementation assumes there is
1464 only one scratch space per process. In this case, we have to
1465 serialize access to the scratch space. If thread A wants to step
1466 over a breakpoint, but we are currently waiting for some other
1467 thread to complete a displaced step, we leave thread A stopped and
1468 place it in the displaced_step_request_queue. Whenever a displaced
1469 step finishes, we pick the next thread in the queue and start a new
1470 displaced step operation on it. See displaced_step_prepare and
1471 displaced_step_fixup for details. */
1473 /* Default destructor for displaced_step_closure. */
1475 displaced_step_closure::~displaced_step_closure () = default;
1477 /* Get the displaced stepping state of process PID. */
1479 static displaced_step_inferior_state
*
1480 get_displaced_stepping_state (inferior
*inf
)
1482 return &inf
->displaced_step_state
;
1485 /* Returns true if any inferior has a thread doing a displaced
1489 displaced_step_in_progress_any_inferior ()
1491 for (inferior
*i
: all_inferiors ())
1493 if (i
->displaced_step_state
.step_thread
!= nullptr)
1500 /* Return true if thread represented by PTID is doing a displaced
1504 displaced_step_in_progress_thread (thread_info
*thread
)
1506 gdb_assert (thread
!= NULL
);
1508 return get_displaced_stepping_state (thread
->inf
)->step_thread
== thread
;
1511 /* Return true if process PID has a thread doing a displaced step. */
1514 displaced_step_in_progress (inferior
*inf
)
1516 return get_displaced_stepping_state (inf
)->step_thread
!= nullptr;
1519 /* If inferior is in displaced stepping, and ADDR equals to starting address
1520 of copy area, return corresponding displaced_step_closure. Otherwise,
1523 struct displaced_step_closure
*
1524 get_displaced_step_closure_by_addr (CORE_ADDR addr
)
1526 displaced_step_inferior_state
*displaced
1527 = get_displaced_stepping_state (current_inferior ());
1529 /* If checking the mode of displaced instruction in copy area. */
1530 if (displaced
->step_thread
!= nullptr
1531 && displaced
->step_copy
== addr
)
1532 return displaced
->step_closure
.get ();
1538 infrun_inferior_exit (struct inferior
*inf
)
1540 inf
->displaced_step_state
.reset ();
1543 /* If ON, and the architecture supports it, GDB will use displaced
1544 stepping to step over breakpoints. If OFF, or if the architecture
1545 doesn't support it, GDB will instead use the traditional
1546 hold-and-step approach. If AUTO (which is the default), GDB will
1547 decide which technique to use to step over breakpoints depending on
1548 whether the target works in a non-stop way (see use_displaced_stepping). */
1550 static enum auto_boolean can_use_displaced_stepping
= AUTO_BOOLEAN_AUTO
;
1553 show_can_use_displaced_stepping (struct ui_file
*file
, int from_tty
,
1554 struct cmd_list_element
*c
,
1557 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
)
1558 fprintf_filtered (file
,
1559 _("Debugger's willingness to use displaced stepping "
1560 "to step over breakpoints is %s (currently %s).\n"),
1561 value
, target_is_non_stop_p () ? "on" : "off");
1563 fprintf_filtered (file
,
1564 _("Debugger's willingness to use displaced stepping "
1565 "to step over breakpoints is %s.\n"), value
);
1568 /* Return true if the gdbarch implements the required methods to use
1569 displaced stepping. */
1572 gdbarch_supports_displaced_stepping (gdbarch
*arch
)
1574 /* Only check for the presence of step_copy_insn. Other required methods
1575 are checked by the gdbarch validation. */
1576 return gdbarch_displaced_step_copy_insn_p (arch
);
1579 /* Return non-zero if displaced stepping can/should be used to step
1580 over breakpoints of thread TP. */
1583 use_displaced_stepping (thread_info
*tp
)
1585 /* If the user disabled it explicitly, don't use displaced stepping. */
1586 if (can_use_displaced_stepping
== AUTO_BOOLEAN_FALSE
)
1589 /* If "auto", only use displaced stepping if the target operates in a non-stop
1591 if (can_use_displaced_stepping
== AUTO_BOOLEAN_AUTO
1592 && !target_is_non_stop_p ())
1595 gdbarch
*gdbarch
= get_thread_regcache (tp
)->arch ();
1597 /* If the architecture doesn't implement displaced stepping, don't use
1599 if (!gdbarch_supports_displaced_stepping (gdbarch
))
1602 /* If recording, don't use displaced stepping. */
1603 if (find_record_target () != nullptr)
1606 displaced_step_inferior_state
*displaced_state
1607 = get_displaced_stepping_state (tp
->inf
);
1609 /* If displaced stepping failed before for this inferior, don't bother trying
1611 if (displaced_state
->failed_before
)
1617 /* Simple function wrapper around displaced_step_inferior_state::reset. */
1620 displaced_step_reset (displaced_step_inferior_state
*displaced
)
1622 displaced
->reset ();
1625 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1626 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1628 using displaced_step_reset_cleanup
= FORWARD_SCOPE_EXIT (displaced_step_reset
);
1630 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1632 displaced_step_dump_bytes (struct ui_file
*file
,
1633 const gdb_byte
*buf
,
1638 for (i
= 0; i
< len
; i
++)
1639 fprintf_unfiltered (file
, "%02x ", buf
[i
]);
1640 fputs_unfiltered ("\n", file
);
1643 /* Prepare to single-step, using displaced stepping.
1645 Note that we cannot use displaced stepping when we have a signal to
1646 deliver. If we have a signal to deliver and an instruction to step
1647 over, then after the step, there will be no indication from the
1648 target whether the thread entered a signal handler or ignored the
1649 signal and stepped over the instruction successfully --- both cases
1650 result in a simple SIGTRAP. In the first case we mustn't do a
1651 fixup, and in the second case we must --- but we can't tell which.
1652 Comments in the code for 'random signals' in handle_inferior_event
1653 explain how we handle this case instead.
1655 Returns 1 if preparing was successful -- this thread is going to be
1656 stepped now; 0 if displaced stepping this thread got queued; or -1
1657 if this instruction can't be displaced stepped. */
1660 displaced_step_prepare_throw (thread_info
*tp
)
1662 regcache
*regcache
= get_thread_regcache (tp
);
1663 struct gdbarch
*gdbarch
= regcache
->arch ();
1664 const address_space
*aspace
= regcache
->aspace ();
1665 CORE_ADDR original
, copy
;
1669 /* We should never reach this function if the architecture does not
1670 support displaced stepping. */
1671 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch
));
1673 /* Nor if the thread isn't meant to step over a breakpoint. */
1674 gdb_assert (tp
->control
.trap_expected
);
1676 /* Disable range stepping while executing in the scratch pad. We
1677 want a single-step even if executing the displaced instruction in
1678 the scratch buffer lands within the stepping range (e.g., a
1680 tp
->control
.may_range_step
= 0;
1682 /* We have to displaced step one thread at a time, as we only have
1683 access to a single scratch space per inferior. */
1685 displaced_step_inferior_state
*displaced
1686 = get_displaced_stepping_state (tp
->inf
);
1688 if (displaced
->step_thread
!= nullptr)
1690 /* Already waiting for a displaced step to finish. Defer this
1691 request and place in queue. */
1693 if (debug_displaced
)
1694 fprintf_unfiltered (gdb_stdlog
,
1695 "displaced: deferring step of %s\n",
1696 target_pid_to_str (tp
->ptid
).c_str ());
1698 thread_step_over_chain_enqueue (tp
);
1703 if (debug_displaced
)
1704 fprintf_unfiltered (gdb_stdlog
,
1705 "displaced: stepping %s now\n",
1706 target_pid_to_str (tp
->ptid
).c_str ());
1709 displaced_step_reset (displaced
);
1711 scoped_restore_current_thread restore_thread
;
1713 switch_to_thread (tp
);
1715 original
= regcache_read_pc (regcache
);
1717 copy
= gdbarch_displaced_step_location (gdbarch
);
1718 len
= gdbarch_max_insn_length (gdbarch
);
1720 if (breakpoint_in_range_p (aspace
, copy
, len
))
1722 /* There's a breakpoint set in the scratch pad location range
1723 (which is usually around the entry point). We'd either
1724 install it before resuming, which would overwrite/corrupt the
1725 scratch pad, or if it was already inserted, this displaced
1726 step would overwrite it. The latter is OK in the sense that
1727 we already assume that no thread is going to execute the code
1728 in the scratch pad range (after initial startup) anyway, but
1729 the former is unacceptable. Simply punt and fallback to
1730 stepping over this breakpoint in-line. */
1731 if (debug_displaced
)
1733 fprintf_unfiltered (gdb_stdlog
,
1734 "displaced: breakpoint set in scratch pad. "
1735 "Stepping over breakpoint in-line instead.\n");
1741 /* Save the original contents of the copy area. */
1742 displaced
->step_saved_copy
.resize (len
);
1743 status
= target_read_memory (copy
, displaced
->step_saved_copy
.data (), len
);
1745 throw_error (MEMORY_ERROR
,
1746 _("Error accessing memory address %s (%s) for "
1747 "displaced-stepping scratch space."),
1748 paddress (gdbarch
, copy
), safe_strerror (status
));
1749 if (debug_displaced
)
1751 fprintf_unfiltered (gdb_stdlog
, "displaced: saved %s: ",
1752 paddress (gdbarch
, copy
));
1753 displaced_step_dump_bytes (gdb_stdlog
,
1754 displaced
->step_saved_copy
.data (),
1758 displaced
->step_closure
1759 = gdbarch_displaced_step_copy_insn (gdbarch
, original
, copy
, regcache
);
1760 if (displaced
->step_closure
== NULL
)
1762 /* The architecture doesn't know how or want to displaced step
1763 this instruction or instruction sequence. Fallback to
1764 stepping over the breakpoint in-line. */
1768 /* Save the information we need to fix things up if the step
1770 displaced
->step_thread
= tp
;
1771 displaced
->step_gdbarch
= gdbarch
;
1772 displaced
->step_original
= original
;
1773 displaced
->step_copy
= copy
;
1776 displaced_step_reset_cleanup
cleanup (displaced
);
1778 /* Resume execution at the copy. */
1779 regcache_write_pc (regcache
, copy
);
1784 if (debug_displaced
)
1785 fprintf_unfiltered (gdb_stdlog
, "displaced: displaced pc to %s\n",
1786 paddress (gdbarch
, copy
));
1791 /* Wrapper for displaced_step_prepare_throw that disabled further
1792 attempts at displaced stepping if we get a memory error. */
1795 displaced_step_prepare (thread_info
*thread
)
1801 prepared
= displaced_step_prepare_throw (thread
);
1803 catch (const gdb_exception_error
&ex
)
1805 struct displaced_step_inferior_state
*displaced_state
;
1807 if (ex
.error
!= MEMORY_ERROR
1808 && ex
.error
!= NOT_SUPPORTED_ERROR
)
1813 fprintf_unfiltered (gdb_stdlog
,
1814 "infrun: disabling displaced stepping: %s\n",
1818 /* Be verbose if "set displaced-stepping" is "on", silent if
1820 if (can_use_displaced_stepping
== AUTO_BOOLEAN_TRUE
)
1822 warning (_("disabling displaced stepping: %s"),
1826 /* Disable further displaced stepping attempts. */
1828 = get_displaced_stepping_state (thread
->inf
);
1829 displaced_state
->failed_before
= 1;
1836 write_memory_ptid (ptid_t ptid
, CORE_ADDR memaddr
,
1837 const gdb_byte
*myaddr
, int len
)
1839 scoped_restore save_inferior_ptid
= make_scoped_restore (&inferior_ptid
);
1841 inferior_ptid
= ptid
;
1842 write_memory (memaddr
, myaddr
, len
);
1845 /* Restore the contents of the copy area for thread PTID. */
1848 displaced_step_restore (struct displaced_step_inferior_state
*displaced
,
1851 ULONGEST len
= gdbarch_max_insn_length (displaced
->step_gdbarch
);
1853 write_memory_ptid (ptid
, displaced
->step_copy
,
1854 displaced
->step_saved_copy
.data (), len
);
1855 if (debug_displaced
)
1856 fprintf_unfiltered (gdb_stdlog
, "displaced: restored %s %s\n",
1857 target_pid_to_str (ptid
).c_str (),
1858 paddress (displaced
->step_gdbarch
,
1859 displaced
->step_copy
));
1862 /* If we displaced stepped an instruction successfully, adjust
1863 registers and memory to yield the same effect the instruction would
1864 have had if we had executed it at its original address, and return
1865 1. If the instruction didn't complete, relocate the PC and return
1866 -1. If the thread wasn't displaced stepping, return 0. */
1869 displaced_step_fixup (thread_info
*event_thread
, enum gdb_signal signal
)
1871 struct displaced_step_inferior_state
*displaced
1872 = get_displaced_stepping_state (event_thread
->inf
);
1875 /* Was this event for the thread we displaced? */
1876 if (displaced
->step_thread
!= event_thread
)
1879 /* Fixup may need to read memory/registers. Switch to the thread
1880 that we're fixing up. Also, target_stopped_by_watchpoint checks
1881 the current thread, and displaced_step_restore performs ptid-dependent
1882 memory accesses using current_inferior() and current_top_target(). */
1883 switch_to_thread (event_thread
);
1885 displaced_step_reset_cleanup
cleanup (displaced
);
1887 displaced_step_restore (displaced
, displaced
->step_thread
->ptid
);
1889 /* Did the instruction complete successfully? */
1890 if (signal
== GDB_SIGNAL_TRAP
1891 && !(target_stopped_by_watchpoint ()
1892 && (gdbarch_have_nonsteppable_watchpoint (displaced
->step_gdbarch
)
1893 || target_have_steppable_watchpoint
)))
1895 /* Fix up the resulting state. */
1896 gdbarch_displaced_step_fixup (displaced
->step_gdbarch
,
1897 displaced
->step_closure
.get (),
1898 displaced
->step_original
,
1899 displaced
->step_copy
,
1900 get_thread_regcache (displaced
->step_thread
));
1905 /* Since the instruction didn't complete, all we can do is
1907 struct regcache
*regcache
= get_thread_regcache (event_thread
);
1908 CORE_ADDR pc
= regcache_read_pc (regcache
);
1910 pc
= displaced
->step_original
+ (pc
- displaced
->step_copy
);
1911 regcache_write_pc (regcache
, pc
);
1918 /* Data to be passed around while handling an event. This data is
1919 discarded between events. */
1920 struct execution_control_state
1922 process_stratum_target
*target
;
1924 /* The thread that got the event, if this was a thread event; NULL
1926 struct thread_info
*event_thread
;
1928 struct target_waitstatus ws
;
1929 int stop_func_filled_in
;
1930 CORE_ADDR stop_func_start
;
1931 CORE_ADDR stop_func_end
;
1932 const char *stop_func_name
;
1935 /* True if the event thread hit the single-step breakpoint of
1936 another thread. Thus the event doesn't cause a stop, the thread
1937 needs to be single-stepped past the single-step breakpoint before
1938 we can switch back to the original stepping thread. */
1939 int hit_singlestep_breakpoint
;
1942 /* Clear ECS and set it to point at TP. */
1945 reset_ecs (struct execution_control_state
*ecs
, struct thread_info
*tp
)
1947 memset (ecs
, 0, sizeof (*ecs
));
1948 ecs
->event_thread
= tp
;
1949 ecs
->ptid
= tp
->ptid
;
1952 static void keep_going_pass_signal (struct execution_control_state
*ecs
);
1953 static void prepare_to_wait (struct execution_control_state
*ecs
);
1954 static int keep_going_stepped_thread (struct thread_info
*tp
);
1955 static step_over_what
thread_still_needs_step_over (struct thread_info
*tp
);
1957 /* Are there any pending step-over requests? If so, run all we can
1958 now and return true. Otherwise, return false. */
1961 start_step_over (void)
1963 struct thread_info
*tp
, *next
;
1965 /* Don't start a new step-over if we already have an in-line
1966 step-over operation ongoing. */
1967 if (step_over_info_valid_p ())
1970 for (tp
= step_over_queue_head
; tp
!= NULL
; tp
= next
)
1972 struct execution_control_state ecss
;
1973 struct execution_control_state
*ecs
= &ecss
;
1974 step_over_what step_what
;
1975 int must_be_in_line
;
1977 gdb_assert (!tp
->stop_requested
);
1979 next
= thread_step_over_chain_next (tp
);
1981 /* If this inferior already has a displaced step in process,
1982 don't start a new one. */
1983 if (displaced_step_in_progress (tp
->inf
))
1986 step_what
= thread_still_needs_step_over (tp
);
1987 must_be_in_line
= ((step_what
& STEP_OVER_WATCHPOINT
)
1988 || ((step_what
& STEP_OVER_BREAKPOINT
)
1989 && !use_displaced_stepping (tp
)));
1991 /* We currently stop all threads of all processes to step-over
1992 in-line. If we need to start a new in-line step-over, let
1993 any pending displaced steps finish first. */
1994 if (must_be_in_line
&& displaced_step_in_progress_any_inferior ())
1997 thread_step_over_chain_remove (tp
);
1999 if (step_over_queue_head
== NULL
)
2002 fprintf_unfiltered (gdb_stdlog
,
2003 "infrun: step-over queue now empty\n");
2006 if (tp
->control
.trap_expected
2010 internal_error (__FILE__
, __LINE__
,
2011 "[%s] has inconsistent state: "
2012 "trap_expected=%d, resumed=%d, executing=%d\n",
2013 target_pid_to_str (tp
->ptid
).c_str (),
2014 tp
->control
.trap_expected
,
2020 fprintf_unfiltered (gdb_stdlog
,
2021 "infrun: resuming [%s] for step-over\n",
2022 target_pid_to_str (tp
->ptid
).c_str ());
2024 /* keep_going_pass_signal skips the step-over if the breakpoint
2025 is no longer inserted. In all-stop, we want to keep looking
2026 for a thread that needs a step-over instead of resuming TP,
2027 because we wouldn't be able to resume anything else until the
2028 target stops again. In non-stop, the resume always resumes
2029 only TP, so it's OK to let the thread resume freely. */
2030 if (!target_is_non_stop_p () && !step_what
)
2033 switch_to_thread (tp
);
2034 reset_ecs (ecs
, tp
);
2035 keep_going_pass_signal (ecs
);
2037 if (!ecs
->wait_some_more
)
2038 error (_("Command aborted."));
2040 gdb_assert (tp
->resumed
);
2042 /* If we started a new in-line step-over, we're done. */
2043 if (step_over_info_valid_p ())
2045 gdb_assert (tp
->control
.trap_expected
);
2049 if (!target_is_non_stop_p ())
2051 /* On all-stop, shouldn't have resumed unless we needed a
2053 gdb_assert (tp
->control
.trap_expected
2054 || tp
->step_after_step_resume_breakpoint
);
2056 /* With remote targets (at least), in all-stop, we can't
2057 issue any further remote commands until the program stops
2062 /* Either the thread no longer needed a step-over, or a new
2063 displaced stepping sequence started. Even in the latter
2064 case, continue looking. Maybe we can also start another
2065 displaced step on a thread of other process. */
2071 /* Update global variables holding ptids to hold NEW_PTID if they were
2072 holding OLD_PTID. */
2074 infrun_thread_ptid_changed (process_stratum_target
*target
,
2075 ptid_t old_ptid
, ptid_t new_ptid
)
2077 if (inferior_ptid
== old_ptid
2078 && current_inferior ()->process_target () == target
)
2079 inferior_ptid
= new_ptid
;
2084 static const char schedlock_off
[] = "off";
2085 static const char schedlock_on
[] = "on";
2086 static const char schedlock_step
[] = "step";
2087 static const char schedlock_replay
[] = "replay";
2088 static const char *const scheduler_enums
[] = {
2095 static const char *scheduler_mode
= schedlock_replay
;
2097 show_scheduler_mode (struct ui_file
*file
, int from_tty
,
2098 struct cmd_list_element
*c
, const char *value
)
2100 fprintf_filtered (file
,
2101 _("Mode for locking scheduler "
2102 "during execution is \"%s\".\n"),
2107 set_schedlock_func (const char *args
, int from_tty
, struct cmd_list_element
*c
)
2109 if (!target_can_lock_scheduler
)
2111 scheduler_mode
= schedlock_off
;
2112 error (_("Target '%s' cannot support this command."), target_shortname
);
2116 /* True if execution commands resume all threads of all processes by
2117 default; otherwise, resume only threads of the current inferior
2119 bool sched_multi
= false;
2121 /* Try to setup for software single stepping over the specified location.
2122 Return 1 if target_resume() should use hardware single step.
2124 GDBARCH the current gdbarch.
2125 PC the location to step over. */
2128 maybe_software_singlestep (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
2132 if (execution_direction
== EXEC_FORWARD
2133 && gdbarch_software_single_step_p (gdbarch
))
2134 hw_step
= !insert_single_step_breakpoints (gdbarch
);
2142 user_visible_resume_ptid (int step
)
2148 /* With non-stop mode on, threads are always handled
2150 resume_ptid
= inferior_ptid
;
2152 else if ((scheduler_mode
== schedlock_on
)
2153 || (scheduler_mode
== schedlock_step
&& step
))
2155 /* User-settable 'scheduler' mode requires solo thread
2157 resume_ptid
= inferior_ptid
;
2159 else if ((scheduler_mode
== schedlock_replay
)
2160 && target_record_will_replay (minus_one_ptid
, execution_direction
))
2162 /* User-settable 'scheduler' mode requires solo thread resume in replay
2164 resume_ptid
= inferior_ptid
;
2166 else if (!sched_multi
&& target_supports_multi_process ())
2168 /* Resume all threads of the current process (and none of other
2170 resume_ptid
= ptid_t (inferior_ptid
.pid ());
2174 /* Resume all threads of all processes. */
2175 resume_ptid
= RESUME_ALL
;
2183 process_stratum_target
*
2184 user_visible_resume_target (ptid_t resume_ptid
)
2186 return (resume_ptid
== minus_one_ptid
&& sched_multi
2188 : current_inferior ()->process_target ());
2191 /* Return a ptid representing the set of threads that we will resume,
2192 in the perspective of the target, assuming run control handling
2193 does not require leaving some threads stopped (e.g., stepping past
2194 breakpoint). USER_STEP indicates whether we're about to start the
2195 target for a stepping command. */
2198 internal_resume_ptid (int user_step
)
2200 /* In non-stop, we always control threads individually. Note that
2201 the target may always work in non-stop mode even with "set
2202 non-stop off", in which case user_visible_resume_ptid could
2203 return a wildcard ptid. */
2204 if (target_is_non_stop_p ())
2205 return inferior_ptid
;
2207 return user_visible_resume_ptid (user_step
);
2210 /* Wrapper for target_resume, that handles infrun-specific
2214 do_target_resume (ptid_t resume_ptid
, int step
, enum gdb_signal sig
)
2216 struct thread_info
*tp
= inferior_thread ();
2218 gdb_assert (!tp
->stop_requested
);
2220 /* Install inferior's terminal modes. */
2221 target_terminal::inferior ();
2223 /* Avoid confusing the next resume, if the next stop/resume
2224 happens to apply to another thread. */
2225 tp
->suspend
.stop_signal
= GDB_SIGNAL_0
;
2227 /* Advise target which signals may be handled silently.
2229 If we have removed breakpoints because we are stepping over one
2230 in-line (in any thread), we need to receive all signals to avoid
2231 accidentally skipping a breakpoint during execution of a signal
2234 Likewise if we're displaced stepping, otherwise a trap for a
2235 breakpoint in a signal handler might be confused with the
2236 displaced step finishing. We don't make the displaced_step_fixup
2237 step distinguish the cases instead, because:
2239 - a backtrace while stopped in the signal handler would show the
2240 scratch pad as frame older than the signal handler, instead of
2241 the real mainline code.
2243 - when the thread is later resumed, the signal handler would
2244 return to the scratch pad area, which would no longer be
2246 if (step_over_info_valid_p ()
2247 || displaced_step_in_progress (tp
->inf
))
2248 target_pass_signals ({});
2250 target_pass_signals (signal_pass
);
2252 target_resume (resume_ptid
, step
, sig
);
2254 target_commit_resume ();
2256 if (target_can_async_p ())
2260 /* Resume the inferior. SIG is the signal to give the inferior
2261 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2262 call 'resume', which handles exceptions. */
2265 resume_1 (enum gdb_signal sig
)
2267 struct regcache
*regcache
= get_current_regcache ();
2268 struct gdbarch
*gdbarch
= regcache
->arch ();
2269 struct thread_info
*tp
= inferior_thread ();
2270 const address_space
*aspace
= regcache
->aspace ();
2272 /* This represents the user's step vs continue request. When
2273 deciding whether "set scheduler-locking step" applies, it's the
2274 user's intention that counts. */
2275 const int user_step
= tp
->control
.stepping_command
;
2276 /* This represents what we'll actually request the target to do.
2277 This can decay from a step to a continue, if e.g., we need to
2278 implement single-stepping with breakpoints (software
2282 gdb_assert (!tp
->stop_requested
);
2283 gdb_assert (!thread_is_in_step_over_chain (tp
));
2285 if (tp
->suspend
.waitstatus_pending_p
)
2290 = target_waitstatus_to_string (&tp
->suspend
.waitstatus
);
2292 fprintf_unfiltered (gdb_stdlog
,
2293 "infrun: resume: thread %s has pending wait "
2294 "status %s (currently_stepping=%d).\n",
2295 target_pid_to_str (tp
->ptid
).c_str (),
2297 currently_stepping (tp
));
2300 tp
->inf
->process_target ()->threads_executing
= true;
2303 /* FIXME: What should we do if we are supposed to resume this
2304 thread with a signal? Maybe we should maintain a queue of
2305 pending signals to deliver. */
2306 if (sig
!= GDB_SIGNAL_0
)
2308 warning (_("Couldn't deliver signal %s to %s."),
2309 gdb_signal_to_name (sig
),
2310 target_pid_to_str (tp
->ptid
).c_str ());
2313 tp
->suspend
.stop_signal
= GDB_SIGNAL_0
;
2315 if (target_can_async_p ())
2318 /* Tell the event loop we have an event to process. */
2319 mark_async_event_handler (infrun_async_inferior_event_token
);
2324 tp
->stepped_breakpoint
= 0;
2326 /* Depends on stepped_breakpoint. */
2327 step
= currently_stepping (tp
);
2329 if (current_inferior ()->waiting_for_vfork_done
)
2331 /* Don't try to single-step a vfork parent that is waiting for
2332 the child to get out of the shared memory region (by exec'ing
2333 or exiting). This is particularly important on software
2334 single-step archs, as the child process would trip on the
2335 software single step breakpoint inserted for the parent
2336 process. Since the parent will not actually execute any
2337 instruction until the child is out of the shared region (such
2338 are vfork's semantics), it is safe to simply continue it.
2339 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2340 the parent, and tell it to `keep_going', which automatically
2341 re-sets it stepping. */
2343 fprintf_unfiltered (gdb_stdlog
,
2344 "infrun: resume : clear step\n");
2348 CORE_ADDR pc
= regcache_read_pc (regcache
);
2351 fprintf_unfiltered (gdb_stdlog
,
2352 "infrun: resume (step=%d, signal=%s), "
2353 "trap_expected=%d, current thread [%s] at %s\n",
2354 step
, gdb_signal_to_symbol_string (sig
),
2355 tp
->control
.trap_expected
,
2356 target_pid_to_str (inferior_ptid
).c_str (),
2357 paddress (gdbarch
, pc
));
2359 /* Normally, by the time we reach `resume', the breakpoints are either
2360 removed or inserted, as appropriate. The exception is if we're sitting
2361 at a permanent breakpoint; we need to step over it, but permanent
2362 breakpoints can't be removed. So we have to test for it here. */
2363 if (breakpoint_here_p (aspace
, pc
) == permanent_breakpoint_here
)
2365 if (sig
!= GDB_SIGNAL_0
)
2367 /* We have a signal to pass to the inferior. The resume
2368 may, or may not take us to the signal handler. If this
2369 is a step, we'll need to stop in the signal handler, if
2370 there's one, (if the target supports stepping into
2371 handlers), or in the next mainline instruction, if
2372 there's no handler. If this is a continue, we need to be
2373 sure to run the handler with all breakpoints inserted.
2374 In all cases, set a breakpoint at the current address
2375 (where the handler returns to), and once that breakpoint
2376 is hit, resume skipping the permanent breakpoint. If
2377 that breakpoint isn't hit, then we've stepped into the
2378 signal handler (or hit some other event). We'll delete
2379 the step-resume breakpoint then. */
2382 fprintf_unfiltered (gdb_stdlog
,
2383 "infrun: resume: skipping permanent breakpoint, "
2384 "deliver signal first\n");
2386 clear_step_over_info ();
2387 tp
->control
.trap_expected
= 0;
2389 if (tp
->control
.step_resume_breakpoint
== NULL
)
2391 /* Set a "high-priority" step-resume, as we don't want
2392 user breakpoints at PC to trigger (again) when this
2394 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2395 gdb_assert (tp
->control
.step_resume_breakpoint
->loc
->permanent
);
2397 tp
->step_after_step_resume_breakpoint
= step
;
2400 insert_breakpoints ();
2404 /* There's no signal to pass, we can go ahead and skip the
2405 permanent breakpoint manually. */
2407 fprintf_unfiltered (gdb_stdlog
,
2408 "infrun: resume: skipping permanent breakpoint\n");
2409 gdbarch_skip_permanent_breakpoint (gdbarch
, regcache
);
2410 /* Update pc to reflect the new address from which we will
2411 execute instructions. */
2412 pc
= regcache_read_pc (regcache
);
2416 /* We've already advanced the PC, so the stepping part
2417 is done. Now we need to arrange for a trap to be
2418 reported to handle_inferior_event. Set a breakpoint
2419 at the current PC, and run to it. Don't update
2420 prev_pc, because if we end in
2421 switch_back_to_stepped_thread, we want the "expected
2422 thread advanced also" branch to be taken. IOW, we
2423 don't want this thread to step further from PC
2425 gdb_assert (!step_over_info_valid_p ());
2426 insert_single_step_breakpoint (gdbarch
, aspace
, pc
);
2427 insert_breakpoints ();
2429 resume_ptid
= internal_resume_ptid (user_step
);
2430 do_target_resume (resume_ptid
, 0, GDB_SIGNAL_0
);
2437 /* If we have a breakpoint to step over, make sure to do a single
2438 step only. Same if we have software watchpoints. */
2439 if (tp
->control
.trap_expected
|| bpstat_should_step ())
2440 tp
->control
.may_range_step
= 0;
2442 /* If displaced stepping is enabled, step over breakpoints by executing a
2443 copy of the instruction at a different address.
2445 We can't use displaced stepping when we have a signal to deliver;
2446 the comments for displaced_step_prepare explain why. The
2447 comments in the handle_inferior event for dealing with 'random
2448 signals' explain what we do instead.
2450 We can't use displaced stepping when we are waiting for vfork_done
2451 event, displaced stepping breaks the vfork child similarly as single
2452 step software breakpoint. */
2453 if (tp
->control
.trap_expected
2454 && use_displaced_stepping (tp
)
2455 && !step_over_info_valid_p ()
2456 && sig
== GDB_SIGNAL_0
2457 && !current_inferior ()->waiting_for_vfork_done
)
2459 int prepared
= displaced_step_prepare (tp
);
2464 fprintf_unfiltered (gdb_stdlog
,
2465 "Got placed in step-over queue\n");
2467 tp
->control
.trap_expected
= 0;
2470 else if (prepared
< 0)
2472 /* Fallback to stepping over the breakpoint in-line. */
2474 if (target_is_non_stop_p ())
2475 stop_all_threads ();
2477 set_step_over_info (regcache
->aspace (),
2478 regcache_read_pc (regcache
), 0, tp
->global_num
);
2480 step
= maybe_software_singlestep (gdbarch
, pc
);
2482 insert_breakpoints ();
2484 else if (prepared
> 0)
2486 struct displaced_step_inferior_state
*displaced
;
2488 /* Update pc to reflect the new address from which we will
2489 execute instructions due to displaced stepping. */
2490 pc
= regcache_read_pc (get_thread_regcache (tp
));
2492 displaced
= get_displaced_stepping_state (tp
->inf
);
2493 step
= gdbarch_displaced_step_hw_singlestep
2494 (gdbarch
, displaced
->step_closure
.get ());
2498 /* Do we need to do it the hard way, w/temp breakpoints? */
2500 step
= maybe_software_singlestep (gdbarch
, pc
);
2502 /* Currently, our software single-step implementation leads to different
2503 results than hardware single-stepping in one situation: when stepping
2504 into delivering a signal which has an associated signal handler,
2505 hardware single-step will stop at the first instruction of the handler,
2506 while software single-step will simply skip execution of the handler.
2508 For now, this difference in behavior is accepted since there is no
2509 easy way to actually implement single-stepping into a signal handler
2510 without kernel support.
2512 However, there is one scenario where this difference leads to follow-on
2513 problems: if we're stepping off a breakpoint by removing all breakpoints
2514 and then single-stepping. In this case, the software single-step
2515 behavior means that even if there is a *breakpoint* in the signal
2516 handler, GDB still would not stop.
2518 Fortunately, we can at least fix this particular issue. We detect
2519 here the case where we are about to deliver a signal while software
2520 single-stepping with breakpoints removed. In this situation, we
2521 revert the decisions to remove all breakpoints and insert single-
2522 step breakpoints, and instead we install a step-resume breakpoint
2523 at the current address, deliver the signal without stepping, and
2524 once we arrive back at the step-resume breakpoint, actually step
2525 over the breakpoint we originally wanted to step over. */
2526 if (thread_has_single_step_breakpoints_set (tp
)
2527 && sig
!= GDB_SIGNAL_0
2528 && step_over_info_valid_p ())
2530 /* If we have nested signals or a pending signal is delivered
2531 immediately after a handler returns, might already have
2532 a step-resume breakpoint set on the earlier handler. We cannot
2533 set another step-resume breakpoint; just continue on until the
2534 original breakpoint is hit. */
2535 if (tp
->control
.step_resume_breakpoint
== NULL
)
2537 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2538 tp
->step_after_step_resume_breakpoint
= 1;
2541 delete_single_step_breakpoints (tp
);
2543 clear_step_over_info ();
2544 tp
->control
.trap_expected
= 0;
2546 insert_breakpoints ();
2549 /* If STEP is set, it's a request to use hardware stepping
2550 facilities. But in that case, we should never
2551 use singlestep breakpoint. */
2552 gdb_assert (!(thread_has_single_step_breakpoints_set (tp
) && step
));
2554 /* Decide the set of threads to ask the target to resume. */
2555 if (tp
->control
.trap_expected
)
2557 /* We're allowing a thread to run past a breakpoint it has
2558 hit, either by single-stepping the thread with the breakpoint
2559 removed, or by displaced stepping, with the breakpoint inserted.
2560 In the former case, we need to single-step only this thread,
2561 and keep others stopped, as they can miss this breakpoint if
2562 allowed to run. That's not really a problem for displaced
2563 stepping, but, we still keep other threads stopped, in case
2564 another thread is also stopped for a breakpoint waiting for
2565 its turn in the displaced stepping queue. */
2566 resume_ptid
= inferior_ptid
;
2569 resume_ptid
= internal_resume_ptid (user_step
);
2571 if (execution_direction
!= EXEC_REVERSE
2572 && step
&& breakpoint_inserted_here_p (aspace
, pc
))
2574 /* There are two cases where we currently need to step a
2575 breakpoint instruction when we have a signal to deliver:
2577 - See handle_signal_stop where we handle random signals that
2578 could take out us out of the stepping range. Normally, in
2579 that case we end up continuing (instead of stepping) over the
2580 signal handler with a breakpoint at PC, but there are cases
2581 where we should _always_ single-step, even if we have a
2582 step-resume breakpoint, like when a software watchpoint is
2583 set. Assuming single-stepping and delivering a signal at the
2584 same time would takes us to the signal handler, then we could
2585 have removed the breakpoint at PC to step over it. However,
2586 some hardware step targets (like e.g., Mac OS) can't step
2587 into signal handlers, and for those, we need to leave the
2588 breakpoint at PC inserted, as otherwise if the handler
2589 recurses and executes PC again, it'll miss the breakpoint.
2590 So we leave the breakpoint inserted anyway, but we need to
2591 record that we tried to step a breakpoint instruction, so
2592 that adjust_pc_after_break doesn't end up confused.
2594 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2595 in one thread after another thread that was stepping had been
2596 momentarily paused for a step-over. When we re-resume the
2597 stepping thread, it may be resumed from that address with a
2598 breakpoint that hasn't trapped yet. Seen with
2599 gdb.threads/non-stop-fair-events.exp, on targets that don't
2600 do displaced stepping. */
2603 fprintf_unfiltered (gdb_stdlog
,
2604 "infrun: resume: [%s] stepped breakpoint\n",
2605 target_pid_to_str (tp
->ptid
).c_str ());
2607 tp
->stepped_breakpoint
= 1;
2609 /* Most targets can step a breakpoint instruction, thus
2610 executing it normally. But if this one cannot, just
2611 continue and we will hit it anyway. */
2612 if (gdbarch_cannot_step_breakpoint (gdbarch
))
2617 && tp
->control
.trap_expected
2618 && use_displaced_stepping (tp
)
2619 && !step_over_info_valid_p ())
2621 struct regcache
*resume_regcache
= get_thread_regcache (tp
);
2622 struct gdbarch
*resume_gdbarch
= resume_regcache
->arch ();
2623 CORE_ADDR actual_pc
= regcache_read_pc (resume_regcache
);
2626 fprintf_unfiltered (gdb_stdlog
, "displaced: run %s: ",
2627 paddress (resume_gdbarch
, actual_pc
));
2628 read_memory (actual_pc
, buf
, sizeof (buf
));
2629 displaced_step_dump_bytes (gdb_stdlog
, buf
, sizeof (buf
));
2632 if (tp
->control
.may_range_step
)
2634 /* If we're resuming a thread with the PC out of the step
2635 range, then we're doing some nested/finer run control
2636 operation, like stepping the thread out of the dynamic
2637 linker or the displaced stepping scratch pad. We
2638 shouldn't have allowed a range step then. */
2639 gdb_assert (pc_in_thread_step_range (pc
, tp
));
2642 do_target_resume (resume_ptid
, step
, sig
);
2646 /* Resume the inferior. SIG is the signal to give the inferior
2647 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2648 rolls back state on error. */
2651 resume (gdb_signal sig
)
2657 catch (const gdb_exception
&ex
)
2659 /* If resuming is being aborted for any reason, delete any
2660 single-step breakpoint resume_1 may have created, to avoid
2661 confusing the following resumption, and to avoid leaving
2662 single-step breakpoints perturbing other threads, in case
2663 we're running in non-stop mode. */
2664 if (inferior_ptid
!= null_ptid
)
2665 delete_single_step_breakpoints (inferior_thread ());
2675 /* Counter that tracks number of user visible stops. This can be used
2676 to tell whether a command has proceeded the inferior past the
2677 current location. This allows e.g., inferior function calls in
2678 breakpoint commands to not interrupt the command list. When the
2679 call finishes successfully, the inferior is standing at the same
2680 breakpoint as if nothing happened (and so we don't call
2682 static ULONGEST current_stop_id
;
2689 return current_stop_id
;
2692 /* Called when we report a user visible stop. */
2700 /* Clear out all variables saying what to do when inferior is continued.
2701 First do this, then set the ones you want, then call `proceed'. */
2704 clear_proceed_status_thread (struct thread_info
*tp
)
2707 fprintf_unfiltered (gdb_stdlog
,
2708 "infrun: clear_proceed_status_thread (%s)\n",
2709 target_pid_to_str (tp
->ptid
).c_str ());
2711 /* If we're starting a new sequence, then the previous finished
2712 single-step is no longer relevant. */
2713 if (tp
->suspend
.waitstatus_pending_p
)
2715 if (tp
->suspend
.stop_reason
== TARGET_STOPPED_BY_SINGLE_STEP
)
2718 fprintf_unfiltered (gdb_stdlog
,
2719 "infrun: clear_proceed_status: pending "
2720 "event of %s was a finished step. "
2722 target_pid_to_str (tp
->ptid
).c_str ());
2724 tp
->suspend
.waitstatus_pending_p
= 0;
2725 tp
->suspend
.stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
2727 else if (debug_infrun
)
2730 = target_waitstatus_to_string (&tp
->suspend
.waitstatus
);
2732 fprintf_unfiltered (gdb_stdlog
,
2733 "infrun: clear_proceed_status_thread: thread %s "
2734 "has pending wait status %s "
2735 "(currently_stepping=%d).\n",
2736 target_pid_to_str (tp
->ptid
).c_str (),
2738 currently_stepping (tp
));
2742 /* If this signal should not be seen by program, give it zero.
2743 Used for debugging signals. */
2744 if (!signal_pass_state (tp
->suspend
.stop_signal
))
2745 tp
->suspend
.stop_signal
= GDB_SIGNAL_0
;
2747 delete tp
->thread_fsm
;
2748 tp
->thread_fsm
= NULL
;
2750 tp
->control
.trap_expected
= 0;
2751 tp
->control
.step_range_start
= 0;
2752 tp
->control
.step_range_end
= 0;
2753 tp
->control
.may_range_step
= 0;
2754 tp
->control
.step_frame_id
= null_frame_id
;
2755 tp
->control
.step_stack_frame_id
= null_frame_id
;
2756 tp
->control
.step_over_calls
= STEP_OVER_UNDEBUGGABLE
;
2757 tp
->control
.step_start_function
= NULL
;
2758 tp
->stop_requested
= 0;
2760 tp
->control
.stop_step
= 0;
2762 tp
->control
.proceed_to_finish
= 0;
2764 tp
->control
.stepping_command
= 0;
2766 /* Discard any remaining commands or status from previous stop. */
2767 bpstat_clear (&tp
->control
.stop_bpstat
);
2771 clear_proceed_status (int step
)
2773 /* With scheduler-locking replay, stop replaying other threads if we're
2774 not replaying the user-visible resume ptid.
2776 This is a convenience feature to not require the user to explicitly
2777 stop replaying the other threads. We're assuming that the user's
2778 intent is to resume tracing the recorded process. */
2779 if (!non_stop
&& scheduler_mode
== schedlock_replay
2780 && target_record_is_replaying (minus_one_ptid
)
2781 && !target_record_will_replay (user_visible_resume_ptid (step
),
2782 execution_direction
))
2783 target_record_stop_replaying ();
2785 if (!non_stop
&& inferior_ptid
!= null_ptid
)
2787 ptid_t resume_ptid
= user_visible_resume_ptid (step
);
2788 process_stratum_target
*resume_target
2789 = user_visible_resume_target (resume_ptid
);
2791 /* In all-stop mode, delete the per-thread status of all threads
2792 we're about to resume, implicitly and explicitly. */
2793 for (thread_info
*tp
: all_non_exited_threads (resume_target
, resume_ptid
))
2794 clear_proceed_status_thread (tp
);
2797 if (inferior_ptid
!= null_ptid
)
2799 struct inferior
*inferior
;
2803 /* If in non-stop mode, only delete the per-thread status of
2804 the current thread. */
2805 clear_proceed_status_thread (inferior_thread ());
2808 inferior
= current_inferior ();
2809 inferior
->control
.stop_soon
= NO_STOP_QUIETLY
;
2812 gdb::observers::about_to_proceed
.notify ();
2815 /* Returns true if TP is still stopped at a breakpoint that needs
2816 stepping-over in order to make progress. If the breakpoint is gone
2817 meanwhile, we can skip the whole step-over dance. */
2820 thread_still_needs_step_over_bp (struct thread_info
*tp
)
2822 if (tp
->stepping_over_breakpoint
)
2824 struct regcache
*regcache
= get_thread_regcache (tp
);
2826 if (breakpoint_here_p (regcache
->aspace (),
2827 regcache_read_pc (regcache
))
2828 == ordinary_breakpoint_here
)
2831 tp
->stepping_over_breakpoint
= 0;
2837 /* Check whether thread TP still needs to start a step-over in order
2838 to make progress when resumed. Returns an bitwise or of enum
2839 step_over_what bits, indicating what needs to be stepped over. */
2841 static step_over_what
2842 thread_still_needs_step_over (struct thread_info
*tp
)
2844 step_over_what what
= 0;
2846 if (thread_still_needs_step_over_bp (tp
))
2847 what
|= STEP_OVER_BREAKPOINT
;
2849 if (tp
->stepping_over_watchpoint
2850 && !target_have_steppable_watchpoint
)
2851 what
|= STEP_OVER_WATCHPOINT
;
2856 /* Returns true if scheduler locking applies. STEP indicates whether
2857 we're about to do a step/next-like command to a thread. */
2860 schedlock_applies (struct thread_info
*tp
)
2862 return (scheduler_mode
== schedlock_on
2863 || (scheduler_mode
== schedlock_step
2864 && tp
->control
.stepping_command
)
2865 || (scheduler_mode
== schedlock_replay
2866 && target_record_will_replay (minus_one_ptid
,
2867 execution_direction
)));
2870 /* Calls target_commit_resume on all targets. */
2873 commit_resume_all_targets ()
2875 scoped_restore_current_thread restore_thread
;
2877 /* Map between process_target and a representative inferior. This
2878 is to avoid committing a resume in the same target more than
2879 once. Resumptions must be idempotent, so this is an
2881 std::unordered_map
<process_stratum_target
*, inferior
*> conn_inf
;
2883 for (inferior
*inf
: all_non_exited_inferiors ())
2884 if (inf
->has_execution ())
2885 conn_inf
[inf
->process_target ()] = inf
;
2887 for (const auto &ci
: conn_inf
)
2889 inferior
*inf
= ci
.second
;
2890 switch_to_inferior_no_thread (inf
);
2891 target_commit_resume ();
2895 /* Check that all the targets we're about to resume are in non-stop
2896 mode. Ideally, we'd only care whether all targets support
2897 target-async, but we're not there yet. E.g., stop_all_threads
2898 doesn't know how to handle all-stop targets. Also, the remote
2899 protocol in all-stop mode is synchronous, irrespective of
2900 target-async, which means that things like a breakpoint re-set
2901 triggered by one target would try to read memory from all targets
2905 check_multi_target_resumption (process_stratum_target
*resume_target
)
2907 if (!non_stop
&& resume_target
== nullptr)
2909 scoped_restore_current_thread restore_thread
;
2911 /* This is used to track whether we're resuming more than one
2913 process_stratum_target
*first_connection
= nullptr;
2915 /* The first inferior we see with a target that does not work in
2916 always-non-stop mode. */
2917 inferior
*first_not_non_stop
= nullptr;
2919 for (inferior
*inf
: all_non_exited_inferiors (resume_target
))
2921 switch_to_inferior_no_thread (inf
);
2923 if (!target_has_execution
)
2926 process_stratum_target
*proc_target
2927 = current_inferior ()->process_target();
2929 if (!target_is_non_stop_p ())
2930 first_not_non_stop
= inf
;
2932 if (first_connection
== nullptr)
2933 first_connection
= proc_target
;
2934 else if (first_connection
!= proc_target
2935 && first_not_non_stop
!= nullptr)
2937 switch_to_inferior_no_thread (first_not_non_stop
);
2939 proc_target
= current_inferior ()->process_target();
2941 error (_("Connection %d (%s) does not support "
2942 "multi-target resumption."),
2943 proc_target
->connection_number
,
2944 make_target_connection_string (proc_target
).c_str ());
2950 /* Basic routine for continuing the program in various fashions.
2952 ADDR is the address to resume at, or -1 for resume where stopped.
2953 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2954 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
2956 You should call clear_proceed_status before calling proceed. */
2959 proceed (CORE_ADDR addr
, enum gdb_signal siggnal
)
2961 struct regcache
*regcache
;
2962 struct gdbarch
*gdbarch
;
2964 struct execution_control_state ecss
;
2965 struct execution_control_state
*ecs
= &ecss
;
2968 /* If we're stopped at a fork/vfork, follow the branch set by the
2969 "set follow-fork-mode" command; otherwise, we'll just proceed
2970 resuming the current thread. */
2971 if (!follow_fork ())
2973 /* The target for some reason decided not to resume. */
2975 if (target_can_async_p ())
2976 inferior_event_handler (INF_EXEC_COMPLETE
);
2980 /* We'll update this if & when we switch to a new thread. */
2981 previous_inferior_ptid
= inferior_ptid
;
2983 regcache
= get_current_regcache ();
2984 gdbarch
= regcache
->arch ();
2985 const address_space
*aspace
= regcache
->aspace ();
2987 pc
= regcache_read_pc_protected (regcache
);
2989 thread_info
*cur_thr
= inferior_thread ();
2991 /* Fill in with reasonable starting values. */
2992 init_thread_stepping_state (cur_thr
);
2994 gdb_assert (!thread_is_in_step_over_chain (cur_thr
));
2997 = user_visible_resume_ptid (cur_thr
->control
.stepping_command
);
2998 process_stratum_target
*resume_target
2999 = user_visible_resume_target (resume_ptid
);
3001 check_multi_target_resumption (resume_target
);
3003 if (addr
== (CORE_ADDR
) -1)
3005 if (pc
== cur_thr
->suspend
.stop_pc
3006 && breakpoint_here_p (aspace
, pc
) == ordinary_breakpoint_here
3007 && execution_direction
!= EXEC_REVERSE
)
3008 /* There is a breakpoint at the address we will resume at,
3009 step one instruction before inserting breakpoints so that
3010 we do not stop right away (and report a second hit at this
3013 Note, we don't do this in reverse, because we won't
3014 actually be executing the breakpoint insn anyway.
3015 We'll be (un-)executing the previous instruction. */
3016 cur_thr
->stepping_over_breakpoint
= 1;
3017 else if (gdbarch_single_step_through_delay_p (gdbarch
)
3018 && gdbarch_single_step_through_delay (gdbarch
,
3019 get_current_frame ()))
3020 /* We stepped onto an instruction that needs to be stepped
3021 again before re-inserting the breakpoint, do so. */
3022 cur_thr
->stepping_over_breakpoint
= 1;
3026 regcache_write_pc (regcache
, addr
);
3029 if (siggnal
!= GDB_SIGNAL_DEFAULT
)
3030 cur_thr
->suspend
.stop_signal
= siggnal
;
3032 /* If an exception is thrown from this point on, make sure to
3033 propagate GDB's knowledge of the executing state to the
3034 frontend/user running state. */
3035 scoped_finish_thread_state
finish_state (resume_target
, resume_ptid
);
3037 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3038 threads (e.g., we might need to set threads stepping over
3039 breakpoints first), from the user/frontend's point of view, all
3040 threads in RESUME_PTID are now running. Unless we're calling an
3041 inferior function, as in that case we pretend the inferior
3042 doesn't run at all. */
3043 if (!cur_thr
->control
.in_infcall
)
3044 set_running (resume_target
, resume_ptid
, true);
3047 fprintf_unfiltered (gdb_stdlog
,
3048 "infrun: proceed (addr=%s, signal=%s)\n",
3049 paddress (gdbarch
, addr
),
3050 gdb_signal_to_symbol_string (siggnal
));
3052 annotate_starting ();
3054 /* Make sure that output from GDB appears before output from the
3056 gdb_flush (gdb_stdout
);
3058 /* Since we've marked the inferior running, give it the terminal. A
3059 QUIT/Ctrl-C from here on is forwarded to the target (which can
3060 still detect attempts to unblock a stuck connection with repeated
3061 Ctrl-C from within target_pass_ctrlc). */
3062 target_terminal::inferior ();
3064 /* In a multi-threaded task we may select another thread and
3065 then continue or step.
3067 But if a thread that we're resuming had stopped at a breakpoint,
3068 it will immediately cause another breakpoint stop without any
3069 execution (i.e. it will report a breakpoint hit incorrectly). So
3070 we must step over it first.
3072 Look for threads other than the current (TP) that reported a
3073 breakpoint hit and haven't been resumed yet since. */
3075 /* If scheduler locking applies, we can avoid iterating over all
3077 if (!non_stop
&& !schedlock_applies (cur_thr
))
3079 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3082 switch_to_thread_no_regs (tp
);
3084 /* Ignore the current thread here. It's handled
3089 if (!thread_still_needs_step_over (tp
))
3092 gdb_assert (!thread_is_in_step_over_chain (tp
));
3095 fprintf_unfiltered (gdb_stdlog
,
3096 "infrun: need to step-over [%s] first\n",
3097 target_pid_to_str (tp
->ptid
).c_str ());
3099 thread_step_over_chain_enqueue (tp
);
3102 switch_to_thread (cur_thr
);
3105 /* Enqueue the current thread last, so that we move all other
3106 threads over their breakpoints first. */
3107 if (cur_thr
->stepping_over_breakpoint
)
3108 thread_step_over_chain_enqueue (cur_thr
);
3110 /* If the thread isn't started, we'll still need to set its prev_pc,
3111 so that switch_back_to_stepped_thread knows the thread hasn't
3112 advanced. Must do this before resuming any thread, as in
3113 all-stop/remote, once we resume we can't send any other packet
3114 until the target stops again. */
3115 cur_thr
->prev_pc
= regcache_read_pc_protected (regcache
);
3118 scoped_restore save_defer_tc
= make_scoped_defer_target_commit_resume ();
3120 started
= start_step_over ();
3122 if (step_over_info_valid_p ())
3124 /* Either this thread started a new in-line step over, or some
3125 other thread was already doing one. In either case, don't
3126 resume anything else until the step-over is finished. */
3128 else if (started
&& !target_is_non_stop_p ())
3130 /* A new displaced stepping sequence was started. In all-stop,
3131 we can't talk to the target anymore until it next stops. */
3133 else if (!non_stop
&& target_is_non_stop_p ())
3135 /* In all-stop, but the target is always in non-stop mode.
3136 Start all other threads that are implicitly resumed too. */
3137 for (thread_info
*tp
: all_non_exited_threads (resume_target
,
3140 switch_to_thread_no_regs (tp
);
3142 if (!tp
->inf
->has_execution ())
3145 fprintf_unfiltered (gdb_stdlog
,
3146 "infrun: proceed: [%s] target has "
3148 target_pid_to_str (tp
->ptid
).c_str ());
3155 fprintf_unfiltered (gdb_stdlog
,
3156 "infrun: proceed: [%s] resumed\n",
3157 target_pid_to_str (tp
->ptid
).c_str ());
3158 gdb_assert (tp
->executing
|| tp
->suspend
.waitstatus_pending_p
);
3162 if (thread_is_in_step_over_chain (tp
))
3165 fprintf_unfiltered (gdb_stdlog
,
3166 "infrun: proceed: [%s] needs step-over\n",
3167 target_pid_to_str (tp
->ptid
).c_str ());
3172 fprintf_unfiltered (gdb_stdlog
,
3173 "infrun: proceed: resuming %s\n",
3174 target_pid_to_str (tp
->ptid
).c_str ());
3176 reset_ecs (ecs
, tp
);
3177 switch_to_thread (tp
);
3178 keep_going_pass_signal (ecs
);
3179 if (!ecs
->wait_some_more
)
3180 error (_("Command aborted."));
3183 else if (!cur_thr
->resumed
&& !thread_is_in_step_over_chain (cur_thr
))
3185 /* The thread wasn't started, and isn't queued, run it now. */
3186 reset_ecs (ecs
, cur_thr
);
3187 switch_to_thread (cur_thr
);
3188 keep_going_pass_signal (ecs
);
3189 if (!ecs
->wait_some_more
)
3190 error (_("Command aborted."));
3194 commit_resume_all_targets ();
3196 finish_state
.release ();
3198 /* If we've switched threads above, switch back to the previously
3199 current thread. We don't want the user to see a different
3201 switch_to_thread (cur_thr
);
3203 /* Tell the event loop to wait for it to stop. If the target
3204 supports asynchronous execution, it'll do this from within
3206 if (!target_can_async_p ())
3207 mark_async_event_handler (infrun_async_inferior_event_token
);
3211 /* Start remote-debugging of a machine over a serial link. */
3214 start_remote (int from_tty
)
3216 inferior
*inf
= current_inferior ();
3217 inf
->control
.stop_soon
= STOP_QUIETLY_REMOTE
;
3219 /* Always go on waiting for the target, regardless of the mode. */
3220 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3221 indicate to wait_for_inferior that a target should timeout if
3222 nothing is returned (instead of just blocking). Because of this,
3223 targets expecting an immediate response need to, internally, set
3224 things up so that the target_wait() is forced to eventually
3226 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3227 differentiate to its caller what the state of the target is after
3228 the initial open has been performed. Here we're assuming that
3229 the target has stopped. It should be possible to eventually have
3230 target_open() return to the caller an indication that the target
3231 is currently running and GDB state should be set to the same as
3232 for an async run. */
3233 wait_for_inferior (inf
);
3235 /* Now that the inferior has stopped, do any bookkeeping like
3236 loading shared libraries. We want to do this before normal_stop,
3237 so that the displayed frame is up to date. */
3238 post_create_inferior (current_top_target (), from_tty
);
3243 /* Initialize static vars when a new inferior begins. */
3246 init_wait_for_inferior (void)
3248 /* These are meaningless until the first time through wait_for_inferior. */
3250 breakpoint_init_inferior (inf_starting
);
3252 clear_proceed_status (0);
3254 nullify_last_target_wait_ptid ();
3256 previous_inferior_ptid
= inferior_ptid
;
3261 static void handle_inferior_event (struct execution_control_state
*ecs
);
3263 static void handle_step_into_function (struct gdbarch
*gdbarch
,
3264 struct execution_control_state
*ecs
);
3265 static void handle_step_into_function_backward (struct gdbarch
*gdbarch
,
3266 struct execution_control_state
*ecs
);
3267 static void handle_signal_stop (struct execution_control_state
*ecs
);
3268 static void check_exception_resume (struct execution_control_state
*,
3269 struct frame_info
*);
3271 static void end_stepping_range (struct execution_control_state
*ecs
);
3272 static void stop_waiting (struct execution_control_state
*ecs
);
3273 static void keep_going (struct execution_control_state
*ecs
);
3274 static void process_event_stop_test (struct execution_control_state
*ecs
);
3275 static int switch_back_to_stepped_thread (struct execution_control_state
*ecs
);
3277 /* This function is attached as a "thread_stop_requested" observer.
3278 Cleanup local state that assumed the PTID was to be resumed, and
3279 report the stop to the frontend. */
3282 infrun_thread_stop_requested (ptid_t ptid
)
3284 process_stratum_target
*curr_target
= current_inferior ()->process_target ();
3286 /* PTID was requested to stop. If the thread was already stopped,
3287 but the user/frontend doesn't know about that yet (e.g., the
3288 thread had been temporarily paused for some step-over), set up
3289 for reporting the stop now. */
3290 for (thread_info
*tp
: all_threads (curr_target
, ptid
))
3292 if (tp
->state
!= THREAD_RUNNING
)
3297 /* Remove matching threads from the step-over queue, so
3298 start_step_over doesn't try to resume them
3300 if (thread_is_in_step_over_chain (tp
))
3301 thread_step_over_chain_remove (tp
);
3303 /* If the thread is stopped, but the user/frontend doesn't
3304 know about that yet, queue a pending event, as if the
3305 thread had just stopped now. Unless the thread already had
3307 if (!tp
->suspend
.waitstatus_pending_p
)
3309 tp
->suspend
.waitstatus_pending_p
= 1;
3310 tp
->suspend
.waitstatus
.kind
= TARGET_WAITKIND_STOPPED
;
3311 tp
->suspend
.waitstatus
.value
.sig
= GDB_SIGNAL_0
;
3314 /* Clear the inline-frame state, since we're re-processing the
3316 clear_inline_frame_state (tp
);
3318 /* If this thread was paused because some other thread was
3319 doing an inline-step over, let that finish first. Once
3320 that happens, we'll restart all threads and consume pending
3321 stop events then. */
3322 if (step_over_info_valid_p ())
3325 /* Otherwise we can process the (new) pending event now. Set
3326 it so this pending event is considered by
3333 infrun_thread_thread_exit (struct thread_info
*tp
, int silent
)
3335 if (target_last_proc_target
== tp
->inf
->process_target ()
3336 && target_last_wait_ptid
== tp
->ptid
)
3337 nullify_last_target_wait_ptid ();
3340 /* Delete the step resume, single-step and longjmp/exception resume
3341 breakpoints of TP. */
3344 delete_thread_infrun_breakpoints (struct thread_info
*tp
)
3346 delete_step_resume_breakpoint (tp
);
3347 delete_exception_resume_breakpoint (tp
);
3348 delete_single_step_breakpoints (tp
);
3351 /* If the target still has execution, call FUNC for each thread that
3352 just stopped. In all-stop, that's all the non-exited threads; in
3353 non-stop, that's the current thread, only. */
3355 typedef void (*for_each_just_stopped_thread_callback_func
)
3356 (struct thread_info
*tp
);
3359 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func
)
3361 if (!target_has_execution
|| inferior_ptid
== null_ptid
)
3364 if (target_is_non_stop_p ())
3366 /* If in non-stop mode, only the current thread stopped. */
3367 func (inferior_thread ());
3371 /* In all-stop mode, all threads have stopped. */
3372 for (thread_info
*tp
: all_non_exited_threads ())
3377 /* Delete the step resume and longjmp/exception resume breakpoints of
3378 the threads that just stopped. */
3381 delete_just_stopped_threads_infrun_breakpoints (void)
3383 for_each_just_stopped_thread (delete_thread_infrun_breakpoints
);
3386 /* Delete the single-step breakpoints of the threads that just
3390 delete_just_stopped_threads_single_step_breakpoints (void)
3392 for_each_just_stopped_thread (delete_single_step_breakpoints
);
3398 print_target_wait_results (ptid_t waiton_ptid
, ptid_t result_ptid
,
3399 const struct target_waitstatus
*ws
)
3401 std::string status_string
= target_waitstatus_to_string (ws
);
3404 /* The text is split over several lines because it was getting too long.
3405 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3406 output as a unit; we want only one timestamp printed if debug_timestamp
3409 stb
.printf ("infrun: target_wait (%d.%ld.%ld",
3412 waiton_ptid
.tid ());
3413 if (waiton_ptid
.pid () != -1)
3414 stb
.printf (" [%s]", target_pid_to_str (waiton_ptid
).c_str ());
3415 stb
.printf (", status) =\n");
3416 stb
.printf ("infrun: %d.%ld.%ld [%s],\n",
3420 target_pid_to_str (result_ptid
).c_str ());
3421 stb
.printf ("infrun: %s\n", status_string
.c_str ());
3423 /* This uses %s in part to handle %'s in the text, but also to avoid
3424 a gcc error: the format attribute requires a string literal. */
3425 fprintf_unfiltered (gdb_stdlog
, "%s", stb
.c_str ());
3428 /* Select a thread at random, out of those which are resumed and have
3431 static struct thread_info
*
3432 random_pending_event_thread (inferior
*inf
, ptid_t waiton_ptid
)
3436 auto has_event
= [&] (thread_info
*tp
)
3438 return (tp
->ptid
.matches (waiton_ptid
)
3440 && tp
->suspend
.waitstatus_pending_p
);
3443 /* First see how many events we have. Count only resumed threads
3444 that have an event pending. */
3445 for (thread_info
*tp
: inf
->non_exited_threads ())
3449 if (num_events
== 0)
3452 /* Now randomly pick a thread out of those that have had events. */
3453 int random_selector
= (int) ((num_events
* (double) rand ())
3454 / (RAND_MAX
+ 1.0));
3456 if (debug_infrun
&& num_events
> 1)
3457 fprintf_unfiltered (gdb_stdlog
,
3458 "infrun: Found %d events, selecting #%d\n",
3459 num_events
, random_selector
);
3461 /* Select the Nth thread that has had an event. */
3462 for (thread_info
*tp
: inf
->non_exited_threads ())
3464 if (random_selector
-- == 0)
3467 gdb_assert_not_reached ("event thread not found");
3470 /* Wrapper for target_wait that first checks whether threads have
3471 pending statuses to report before actually asking the target for
3472 more events. INF is the inferior we're using to call target_wait
3476 do_target_wait_1 (inferior
*inf
, ptid_t ptid
,
3477 target_waitstatus
*status
, int options
)
3480 struct thread_info
*tp
;
3482 /* We know that we are looking for an event in the target of inferior
3483 INF, but we don't know which thread the event might come from. As
3484 such we want to make sure that INFERIOR_PTID is reset so that none of
3485 the wait code relies on it - doing so is always a mistake. */
3486 switch_to_inferior_no_thread (inf
);
3488 /* First check if there is a resumed thread with a wait status
3490 if (ptid
== minus_one_ptid
|| ptid
.is_pid ())
3492 tp
= random_pending_event_thread (inf
, ptid
);
3497 fprintf_unfiltered (gdb_stdlog
,
3498 "infrun: Waiting for specific thread %s.\n",
3499 target_pid_to_str (ptid
).c_str ());
3501 /* We have a specific thread to check. */
3502 tp
= find_thread_ptid (inf
, ptid
);
3503 gdb_assert (tp
!= NULL
);
3504 if (!tp
->suspend
.waitstatus_pending_p
)
3509 && (tp
->suspend
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3510 || tp
->suspend
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
))
3512 struct regcache
*regcache
= get_thread_regcache (tp
);
3513 struct gdbarch
*gdbarch
= regcache
->arch ();
3517 pc
= regcache_read_pc (regcache
);
3519 if (pc
!= tp
->suspend
.stop_pc
)
3522 fprintf_unfiltered (gdb_stdlog
,
3523 "infrun: PC of %s changed. was=%s, now=%s\n",
3524 target_pid_to_str (tp
->ptid
).c_str (),
3525 paddress (gdbarch
, tp
->suspend
.stop_pc
),
3526 paddress (gdbarch
, pc
));
3529 else if (!breakpoint_inserted_here_p (regcache
->aspace (), pc
))
3532 fprintf_unfiltered (gdb_stdlog
,
3533 "infrun: previous breakpoint of %s, at %s gone\n",
3534 target_pid_to_str (tp
->ptid
).c_str (),
3535 paddress (gdbarch
, pc
));
3543 fprintf_unfiltered (gdb_stdlog
,
3544 "infrun: pending event of %s cancelled.\n",
3545 target_pid_to_str (tp
->ptid
).c_str ());
3547 tp
->suspend
.waitstatus
.kind
= TARGET_WAITKIND_SPURIOUS
;
3548 tp
->suspend
.stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3557 = target_waitstatus_to_string (&tp
->suspend
.waitstatus
);
3559 fprintf_unfiltered (gdb_stdlog
,
3560 "infrun: Using pending wait status %s for %s.\n",
3562 target_pid_to_str (tp
->ptid
).c_str ());
3565 /* Now that we've selected our final event LWP, un-adjust its PC
3566 if it was a software breakpoint (and the target doesn't
3567 always adjust the PC itself). */
3568 if (tp
->suspend
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
3569 && !target_supports_stopped_by_sw_breakpoint ())
3571 struct regcache
*regcache
;
3572 struct gdbarch
*gdbarch
;
3575 regcache
= get_thread_regcache (tp
);
3576 gdbarch
= regcache
->arch ();
3578 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
3583 pc
= regcache_read_pc (regcache
);
3584 regcache_write_pc (regcache
, pc
+ decr_pc
);
3588 tp
->suspend
.stop_reason
= TARGET_STOPPED_BY_NO_REASON
;
3589 *status
= tp
->suspend
.waitstatus
;
3590 tp
->suspend
.waitstatus_pending_p
= 0;
3592 /* Wake up the event loop again, until all pending events are
3594 if (target_is_async_p ())
3595 mark_async_event_handler (infrun_async_inferior_event_token
);
3599 /* But if we don't find one, we'll have to wait. */
3601 if (deprecated_target_wait_hook
)
3602 event_ptid
= deprecated_target_wait_hook (ptid
, status
, options
);
3604 event_ptid
= target_wait (ptid
, status
, options
);
3609 /* Wrapper for target_wait that first checks whether threads have
3610 pending statuses to report before actually asking the target for
3611 more events. Polls for events from all inferiors/targets. */
3614 do_target_wait (ptid_t wait_ptid
, execution_control_state
*ecs
, int options
)
3616 int num_inferiors
= 0;
3617 int random_selector
;
3619 /* For fairness, we pick the first inferior/target to poll at random
3620 out of all inferiors that may report events, and then continue
3621 polling the rest of the inferior list starting from that one in a
3622 circular fashion until the whole list is polled once. */
3624 auto inferior_matches
= [&wait_ptid
] (inferior
*inf
)
3626 return (inf
->process_target () != NULL
3627 && ptid_t (inf
->pid
).matches (wait_ptid
));
3630 /* First see how many matching inferiors we have. */
3631 for (inferior
*inf
: all_inferiors ())
3632 if (inferior_matches (inf
))
3635 if (num_inferiors
== 0)
3637 ecs
->ws
.kind
= TARGET_WAITKIND_IGNORE
;
3641 /* Now randomly pick an inferior out of those that matched. */
3642 random_selector
= (int)
3643 ((num_inferiors
* (double) rand ()) / (RAND_MAX
+ 1.0));
3645 if (debug_infrun
&& num_inferiors
> 1)
3646 fprintf_unfiltered (gdb_stdlog
,
3647 "infrun: Found %d inferiors, starting at #%d\n",
3648 num_inferiors
, random_selector
);
3650 /* Select the Nth inferior that matched. */
3652 inferior
*selected
= nullptr;
3654 for (inferior
*inf
: all_inferiors ())
3655 if (inferior_matches (inf
))
3656 if (random_selector
-- == 0)
3662 /* Now poll for events out of each of the matching inferior's
3663 targets, starting from the selected one. */
3665 auto do_wait
= [&] (inferior
*inf
)
3667 ecs
->ptid
= do_target_wait_1 (inf
, wait_ptid
, &ecs
->ws
, options
);
3668 ecs
->target
= inf
->process_target ();
3669 return (ecs
->ws
.kind
!= TARGET_WAITKIND_IGNORE
);
3672 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3673 here spuriously after the target is all stopped and we've already
3674 reported the stop to the user, polling for events. */
3675 scoped_restore_current_thread restore_thread
;
3677 int inf_num
= selected
->num
;
3678 for (inferior
*inf
= selected
; inf
!= NULL
; inf
= inf
->next
)
3679 if (inferior_matches (inf
))
3683 for (inferior
*inf
= inferior_list
;
3684 inf
!= NULL
&& inf
->num
< inf_num
;
3686 if (inferior_matches (inf
))
3690 ecs
->ws
.kind
= TARGET_WAITKIND_IGNORE
;
3694 /* Prepare and stabilize the inferior for detaching it. E.g.,
3695 detaching while a thread is displaced stepping is a recipe for
3696 crashing it, as nothing would readjust the PC out of the scratch
3700 prepare_for_detach (void)
3702 struct inferior
*inf
= current_inferior ();
3703 ptid_t pid_ptid
= ptid_t (inf
->pid
);
3705 displaced_step_inferior_state
*displaced
= get_displaced_stepping_state (inf
);
3707 /* Is any thread of this process displaced stepping? If not,
3708 there's nothing else to do. */
3709 if (displaced
->step_thread
== nullptr)
3713 fprintf_unfiltered (gdb_stdlog
,
3714 "displaced-stepping in-process while detaching");
3716 scoped_restore restore_detaching
= make_scoped_restore (&inf
->detaching
, true);
3718 while (displaced
->step_thread
!= nullptr)
3720 struct execution_control_state ecss
;
3721 struct execution_control_state
*ecs
;
3724 memset (ecs
, 0, sizeof (*ecs
));
3726 overlay_cache_invalid
= 1;
3727 /* Flush target cache before starting to handle each event.
3728 Target was running and cache could be stale. This is just a
3729 heuristic. Running threads may modify target memory, but we
3730 don't get any event. */
3731 target_dcache_invalidate ();
3733 do_target_wait (pid_ptid
, ecs
, 0);
3736 print_target_wait_results (pid_ptid
, ecs
->ptid
, &ecs
->ws
);
3738 /* If an error happens while handling the event, propagate GDB's
3739 knowledge of the executing state to the frontend/user running
3741 scoped_finish_thread_state
finish_state (inf
->process_target (),
3744 /* Now figure out what to do with the result of the result. */
3745 handle_inferior_event (ecs
);
3747 /* No error, don't finish the state yet. */
3748 finish_state
.release ();
3750 /* Breakpoints and watchpoints are not installed on the target
3751 at this point, and signals are passed directly to the
3752 inferior, so this must mean the process is gone. */
3753 if (!ecs
->wait_some_more
)
3755 restore_detaching
.release ();
3756 error (_("Program exited while detaching"));
3760 restore_detaching
.release ();
3763 /* Wait for control to return from inferior to debugger.
3765 If inferior gets a signal, we may decide to start it up again
3766 instead of returning. That is why there is a loop in this function.
3767 When this function actually returns it means the inferior
3768 should be left stopped and GDB should read more commands. */
3771 wait_for_inferior (inferior
*inf
)
3775 (gdb_stdlog
, "infrun: wait_for_inferior ()\n");
3777 SCOPE_EXIT
{ delete_just_stopped_threads_infrun_breakpoints (); };
3779 /* If an error happens while handling the event, propagate GDB's
3780 knowledge of the executing state to the frontend/user running
3782 scoped_finish_thread_state finish_state
3783 (inf
->process_target (), minus_one_ptid
);
3787 struct execution_control_state ecss
;
3788 struct execution_control_state
*ecs
= &ecss
;
3790 memset (ecs
, 0, sizeof (*ecs
));
3792 overlay_cache_invalid
= 1;
3794 /* Flush target cache before starting to handle each event.
3795 Target was running and cache could be stale. This is just a
3796 heuristic. Running threads may modify target memory, but we
3797 don't get any event. */
3798 target_dcache_invalidate ();
3800 ecs
->ptid
= do_target_wait_1 (inf
, minus_one_ptid
, &ecs
->ws
, 0);
3801 ecs
->target
= inf
->process_target ();
3804 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, &ecs
->ws
);
3806 /* Now figure out what to do with the result of the result. */
3807 handle_inferior_event (ecs
);
3809 if (!ecs
->wait_some_more
)
3813 /* No error, don't finish the state yet. */
3814 finish_state
.release ();
3817 /* Cleanup that reinstalls the readline callback handler, if the
3818 target is running in the background. If while handling the target
3819 event something triggered a secondary prompt, like e.g., a
3820 pagination prompt, we'll have removed the callback handler (see
3821 gdb_readline_wrapper_line). Need to do this as we go back to the
3822 event loop, ready to process further input. Note this has no
3823 effect if the handler hasn't actually been removed, because calling
3824 rl_callback_handler_install resets the line buffer, thus losing
3828 reinstall_readline_callback_handler_cleanup ()
3830 struct ui
*ui
= current_ui
;
3834 /* We're not going back to the top level event loop yet. Don't
3835 install the readline callback, as it'd prep the terminal,
3836 readline-style (raw, noecho) (e.g., --batch). We'll install
3837 it the next time the prompt is displayed, when we're ready
3842 if (ui
->command_editing
&& ui
->prompt_state
!= PROMPT_BLOCKED
)
3843 gdb_rl_callback_handler_reinstall ();
3846 /* Clean up the FSMs of threads that are now stopped. In non-stop,
3847 that's just the event thread. In all-stop, that's all threads. */
3850 clean_up_just_stopped_threads_fsms (struct execution_control_state
*ecs
)
3852 if (ecs
->event_thread
!= NULL
3853 && ecs
->event_thread
->thread_fsm
!= NULL
)
3854 ecs
->event_thread
->thread_fsm
->clean_up (ecs
->event_thread
);
3858 for (thread_info
*thr
: all_non_exited_threads ())
3860 if (thr
->thread_fsm
== NULL
)
3862 if (thr
== ecs
->event_thread
)
3865 switch_to_thread (thr
);
3866 thr
->thread_fsm
->clean_up (thr
);
3869 if (ecs
->event_thread
!= NULL
)
3870 switch_to_thread (ecs
->event_thread
);
3874 /* Helper for all_uis_check_sync_execution_done that works on the
3878 check_curr_ui_sync_execution_done (void)
3880 struct ui
*ui
= current_ui
;
3882 if (ui
->prompt_state
== PROMPT_NEEDED
3884 && !gdb_in_secondary_prompt_p (ui
))
3886 target_terminal::ours ();
3887 gdb::observers::sync_execution_done
.notify ();
3888 ui_register_input_event_handler (ui
);
3895 all_uis_check_sync_execution_done (void)
3897 SWITCH_THRU_ALL_UIS ()
3899 check_curr_ui_sync_execution_done ();
3906 all_uis_on_sync_execution_starting (void)
3908 SWITCH_THRU_ALL_UIS ()
3910 if (current_ui
->prompt_state
== PROMPT_NEEDED
)
3911 async_disable_stdin ();
3915 /* Asynchronous version of wait_for_inferior. It is called by the
3916 event loop whenever a change of state is detected on the file
3917 descriptor corresponding to the target. It can be called more than
3918 once to complete a single execution command. In such cases we need
3919 to keep the state in a global variable ECSS. If it is the last time
3920 that this function is called for a single execution command, then
3921 report to the user that the inferior has stopped, and do the
3922 necessary cleanups. */
3925 fetch_inferior_event ()
3927 struct execution_control_state ecss
;
3928 struct execution_control_state
*ecs
= &ecss
;
3931 memset (ecs
, 0, sizeof (*ecs
));
3933 /* Events are always processed with the main UI as current UI. This
3934 way, warnings, debug output, etc. are always consistently sent to
3935 the main console. */
3936 scoped_restore save_ui
= make_scoped_restore (¤t_ui
, main_ui
);
3938 /* End up with readline processing input, if necessary. */
3940 SCOPE_EXIT
{ reinstall_readline_callback_handler_cleanup (); };
3942 /* We're handling a live event, so make sure we're doing live
3943 debugging. If we're looking at traceframes while the target is
3944 running, we're going to need to get back to that mode after
3945 handling the event. */
3946 gdb::optional
<scoped_restore_current_traceframe
> maybe_restore_traceframe
;
3949 maybe_restore_traceframe
.emplace ();
3950 set_current_traceframe (-1);
3953 /* The user/frontend should not notice a thread switch due to
3954 internal events. Make sure we revert to the user selected
3955 thread and frame after handling the event and running any
3956 breakpoint commands. */
3957 scoped_restore_current_thread restore_thread
;
3959 overlay_cache_invalid
= 1;
3960 /* Flush target cache before starting to handle each event. Target
3961 was running and cache could be stale. This is just a heuristic.
3962 Running threads may modify target memory, but we don't get any
3964 target_dcache_invalidate ();
3966 scoped_restore save_exec_dir
3967 = make_scoped_restore (&execution_direction
,
3968 target_execution_direction ());
3970 if (!do_target_wait (minus_one_ptid
, ecs
, TARGET_WNOHANG
))
3973 gdb_assert (ecs
->ws
.kind
!= TARGET_WAITKIND_IGNORE
);
3975 /* Switch to the target that generated the event, so we can do
3976 target calls. Any inferior bound to the target will do, so we
3977 just switch to the first we find. */
3978 for (inferior
*inf
: all_inferiors (ecs
->target
))
3980 switch_to_inferior_no_thread (inf
);
3985 print_target_wait_results (minus_one_ptid
, ecs
->ptid
, &ecs
->ws
);
3987 /* If an error happens while handling the event, propagate GDB's
3988 knowledge of the executing state to the frontend/user running
3990 ptid_t finish_ptid
= !target_is_non_stop_p () ? minus_one_ptid
: ecs
->ptid
;
3991 scoped_finish_thread_state
finish_state (ecs
->target
, finish_ptid
);
3993 /* Get executed before scoped_restore_current_thread above to apply
3994 still for the thread which has thrown the exception. */
3995 auto defer_bpstat_clear
3996 = make_scope_exit (bpstat_clear_actions
);
3997 auto defer_delete_threads
3998 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints
);
4000 /* Now figure out what to do with the result of the result. */
4001 handle_inferior_event (ecs
);
4003 if (!ecs
->wait_some_more
)
4005 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4006 int should_stop
= 1;
4007 struct thread_info
*thr
= ecs
->event_thread
;
4009 delete_just_stopped_threads_infrun_breakpoints ();
4013 struct thread_fsm
*thread_fsm
= thr
->thread_fsm
;
4015 if (thread_fsm
!= NULL
)
4016 should_stop
= thread_fsm
->should_stop (thr
);
4025 bool should_notify_stop
= true;
4028 clean_up_just_stopped_threads_fsms (ecs
);
4030 if (thr
!= NULL
&& thr
->thread_fsm
!= NULL
)
4031 should_notify_stop
= thr
->thread_fsm
->should_notify_stop ();
4033 if (should_notify_stop
)
4035 /* We may not find an inferior if this was a process exit. */
4036 if (inf
== NULL
|| inf
->control
.stop_soon
== NO_STOP_QUIETLY
)
4037 proceeded
= normal_stop ();
4042 inferior_event_handler (INF_EXEC_COMPLETE
);
4046 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4047 previously selected thread is gone. We have two
4048 choices - switch to no thread selected, or restore the
4049 previously selected thread (now exited). We chose the
4050 later, just because that's what GDB used to do. After
4051 this, "info threads" says "The current thread <Thread
4052 ID 2> has terminated." instead of "No thread
4056 && ecs
->ws
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
4057 restore_thread
.dont_restore ();
4061 defer_delete_threads
.release ();
4062 defer_bpstat_clear
.release ();
4064 /* No error, don't finish the thread states yet. */
4065 finish_state
.release ();
4067 /* This scope is used to ensure that readline callbacks are
4068 reinstalled here. */
4071 /* If a UI was in sync execution mode, and now isn't, restore its
4072 prompt (a synchronous execution command has finished, and we're
4073 ready for input). */
4074 all_uis_check_sync_execution_done ();
4077 && exec_done_display_p
4078 && (inferior_ptid
== null_ptid
4079 || inferior_thread ()->state
!= THREAD_RUNNING
))
4080 printf_unfiltered (_("completed.\n"));
4086 set_step_info (thread_info
*tp
, struct frame_info
*frame
,
4087 struct symtab_and_line sal
)
4089 /* This can be removed once this function no longer implicitly relies on the
4090 inferior_ptid value. */
4091 gdb_assert (inferior_ptid
== tp
->ptid
);
4093 tp
->control
.step_frame_id
= get_frame_id (frame
);
4094 tp
->control
.step_stack_frame_id
= get_stack_frame_id (frame
);
4096 tp
->current_symtab
= sal
.symtab
;
4097 tp
->current_line
= sal
.line
;
4100 /* Clear context switchable stepping state. */
4103 init_thread_stepping_state (struct thread_info
*tss
)
4105 tss
->stepped_breakpoint
= 0;
4106 tss
->stepping_over_breakpoint
= 0;
4107 tss
->stepping_over_watchpoint
= 0;
4108 tss
->step_after_step_resume_breakpoint
= 0;
4114 set_last_target_status (process_stratum_target
*target
, ptid_t ptid
,
4115 target_waitstatus status
)
4117 target_last_proc_target
= target
;
4118 target_last_wait_ptid
= ptid
;
4119 target_last_waitstatus
= status
;
4125 get_last_target_status (process_stratum_target
**target
, ptid_t
*ptid
,
4126 target_waitstatus
*status
)
4128 if (target
!= nullptr)
4129 *target
= target_last_proc_target
;
4130 if (ptid
!= nullptr)
4131 *ptid
= target_last_wait_ptid
;
4132 if (status
!= nullptr)
4133 *status
= target_last_waitstatus
;
4139 nullify_last_target_wait_ptid (void)
4141 target_last_proc_target
= nullptr;
4142 target_last_wait_ptid
= minus_one_ptid
;
4143 target_last_waitstatus
= {};
4146 /* Switch thread contexts. */
4149 context_switch (execution_control_state
*ecs
)
4152 && ecs
->ptid
!= inferior_ptid
4153 && (inferior_ptid
== null_ptid
4154 || ecs
->event_thread
!= inferior_thread ()))
4156 fprintf_unfiltered (gdb_stdlog
, "infrun: Switching context from %s ",
4157 target_pid_to_str (inferior_ptid
).c_str ());
4158 fprintf_unfiltered (gdb_stdlog
, "to %s\n",
4159 target_pid_to_str (ecs
->ptid
).c_str ());
4162 switch_to_thread (ecs
->event_thread
);
4165 /* If the target can't tell whether we've hit breakpoints
4166 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4167 check whether that could have been caused by a breakpoint. If so,
4168 adjust the PC, per gdbarch_decr_pc_after_break. */
4171 adjust_pc_after_break (struct thread_info
*thread
,
4172 struct target_waitstatus
*ws
)
4174 struct regcache
*regcache
;
4175 struct gdbarch
*gdbarch
;
4176 CORE_ADDR breakpoint_pc
, decr_pc
;
4178 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4179 we aren't, just return.
4181 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4182 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4183 implemented by software breakpoints should be handled through the normal
4186 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4187 different signals (SIGILL or SIGEMT for instance), but it is less
4188 clear where the PC is pointing afterwards. It may not match
4189 gdbarch_decr_pc_after_break. I don't know any specific target that
4190 generates these signals at breakpoints (the code has been in GDB since at
4191 least 1992) so I can not guess how to handle them here.
4193 In earlier versions of GDB, a target with
4194 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4195 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4196 target with both of these set in GDB history, and it seems unlikely to be
4197 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4199 if (ws
->kind
!= TARGET_WAITKIND_STOPPED
)
4202 if (ws
->value
.sig
!= GDB_SIGNAL_TRAP
)
4205 /* In reverse execution, when a breakpoint is hit, the instruction
4206 under it has already been de-executed. The reported PC always
4207 points at the breakpoint address, so adjusting it further would
4208 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4211 B1 0x08000000 : INSN1
4212 B2 0x08000001 : INSN2
4214 PC -> 0x08000003 : INSN4
4216 Say you're stopped at 0x08000003 as above. Reverse continuing
4217 from that point should hit B2 as below. Reading the PC when the
4218 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4219 been de-executed already.
4221 B1 0x08000000 : INSN1
4222 B2 PC -> 0x08000001 : INSN2
4226 We can't apply the same logic as for forward execution, because
4227 we would wrongly adjust the PC to 0x08000000, since there's a
4228 breakpoint at PC - 1. We'd then report a hit on B1, although
4229 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4231 if (execution_direction
== EXEC_REVERSE
)
4234 /* If the target can tell whether the thread hit a SW breakpoint,
4235 trust it. Targets that can tell also adjust the PC
4237 if (target_supports_stopped_by_sw_breakpoint ())
4240 /* Note that relying on whether a breakpoint is planted in memory to
4241 determine this can fail. E.g,. the breakpoint could have been
4242 removed since. Or the thread could have been told to step an
4243 instruction the size of a breakpoint instruction, and only
4244 _after_ was a breakpoint inserted at its address. */
4246 /* If this target does not decrement the PC after breakpoints, then
4247 we have nothing to do. */
4248 regcache
= get_thread_regcache (thread
);
4249 gdbarch
= regcache
->arch ();
4251 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
4255 const address_space
*aspace
= regcache
->aspace ();
4257 /* Find the location where (if we've hit a breakpoint) the
4258 breakpoint would be. */
4259 breakpoint_pc
= regcache_read_pc (regcache
) - decr_pc
;
4261 /* If the target can't tell whether a software breakpoint triggered,
4262 fallback to figuring it out based on breakpoints we think were
4263 inserted in the target, and on whether the thread was stepped or
4266 /* Check whether there actually is a software breakpoint inserted at
4269 If in non-stop mode, a race condition is possible where we've
4270 removed a breakpoint, but stop events for that breakpoint were
4271 already queued and arrive later. To suppress those spurious
4272 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4273 and retire them after a number of stop events are reported. Note
4274 this is an heuristic and can thus get confused. The real fix is
4275 to get the "stopped by SW BP and needs adjustment" info out of
4276 the target/kernel (and thus never reach here; see above). */
4277 if (software_breakpoint_inserted_here_p (aspace
, breakpoint_pc
)
4278 || (target_is_non_stop_p ()
4279 && moribund_breakpoint_here_p (aspace
, breakpoint_pc
)))
4281 gdb::optional
<scoped_restore_tmpl
<int>> restore_operation_disable
;
4283 if (record_full_is_used ())
4284 restore_operation_disable
.emplace
4285 (record_full_gdb_operation_disable_set ());
4287 /* When using hardware single-step, a SIGTRAP is reported for both
4288 a completed single-step and a software breakpoint. Need to
4289 differentiate between the two, as the latter needs adjusting
4290 but the former does not.
4292 The SIGTRAP can be due to a completed hardware single-step only if
4293 - we didn't insert software single-step breakpoints
4294 - this thread is currently being stepped
4296 If any of these events did not occur, we must have stopped due
4297 to hitting a software breakpoint, and have to back up to the
4300 As a special case, we could have hardware single-stepped a
4301 software breakpoint. In this case (prev_pc == breakpoint_pc),
4302 we also need to back up to the breakpoint address. */
4304 if (thread_has_single_step_breakpoints_set (thread
)
4305 || !currently_stepping (thread
)
4306 || (thread
->stepped_breakpoint
4307 && thread
->prev_pc
== breakpoint_pc
))
4308 regcache_write_pc (regcache
, breakpoint_pc
);
4313 stepped_in_from (struct frame_info
*frame
, struct frame_id step_frame_id
)
4315 for (frame
= get_prev_frame (frame
);
4317 frame
= get_prev_frame (frame
))
4319 if (frame_id_eq (get_frame_id (frame
), step_frame_id
))
4321 if (get_frame_type (frame
) != INLINE_FRAME
)
4328 /* Look for an inline frame that is marked for skip.
4329 If PREV_FRAME is TRUE start at the previous frame,
4330 otherwise start at the current frame. Stop at the
4331 first non-inline frame, or at the frame where the
4335 inline_frame_is_marked_for_skip (bool prev_frame
, struct thread_info
*tp
)
4337 struct frame_info
*frame
= get_current_frame ();
4340 frame
= get_prev_frame (frame
);
4342 for (; frame
!= NULL
; frame
= get_prev_frame (frame
))
4344 const char *fn
= NULL
;
4345 symtab_and_line sal
;
4348 if (frame_id_eq (get_frame_id (frame
), tp
->control
.step_frame_id
))
4350 if (get_frame_type (frame
) != INLINE_FRAME
)
4353 sal
= find_frame_sal (frame
);
4354 sym
= get_frame_function (frame
);
4357 fn
= sym
->print_name ();
4360 && function_name_is_marked_for_skip (fn
, sal
))
4367 /* If the event thread has the stop requested flag set, pretend it
4368 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4372 handle_stop_requested (struct execution_control_state
*ecs
)
4374 if (ecs
->event_thread
->stop_requested
)
4376 ecs
->ws
.kind
= TARGET_WAITKIND_STOPPED
;
4377 ecs
->ws
.value
.sig
= GDB_SIGNAL_0
;
4378 handle_signal_stop (ecs
);
4384 /* Auxiliary function that handles syscall entry/return events.
4385 It returns 1 if the inferior should keep going (and GDB
4386 should ignore the event), or 0 if the event deserves to be
4390 handle_syscall_event (struct execution_control_state
*ecs
)
4392 struct regcache
*regcache
;
4395 context_switch (ecs
);
4397 regcache
= get_thread_regcache (ecs
->event_thread
);
4398 syscall_number
= ecs
->ws
.value
.syscall_number
;
4399 ecs
->event_thread
->suspend
.stop_pc
= regcache_read_pc (regcache
);
4401 if (catch_syscall_enabled () > 0
4402 && catching_syscall_number (syscall_number
) > 0)
4405 fprintf_unfiltered (gdb_stdlog
, "infrun: syscall number = '%d'\n",
4408 ecs
->event_thread
->control
.stop_bpstat
4409 = bpstat_stop_status (regcache
->aspace (),
4410 ecs
->event_thread
->suspend
.stop_pc
,
4411 ecs
->event_thread
, &ecs
->ws
);
4413 if (handle_stop_requested (ecs
))
4416 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
4418 /* Catchpoint hit. */
4423 if (handle_stop_requested (ecs
))
4426 /* If no catchpoint triggered for this, then keep going. */
4431 /* Lazily fill in the execution_control_state's stop_func_* fields. */
4434 fill_in_stop_func (struct gdbarch
*gdbarch
,
4435 struct execution_control_state
*ecs
)
4437 if (!ecs
->stop_func_filled_in
)
4441 /* Don't care about return value; stop_func_start and stop_func_name
4442 will both be 0 if it doesn't work. */
4443 find_pc_partial_function (ecs
->event_thread
->suspend
.stop_pc
,
4444 &ecs
->stop_func_name
,
4445 &ecs
->stop_func_start
,
4446 &ecs
->stop_func_end
,
4449 /* The call to find_pc_partial_function, above, will set
4450 stop_func_start and stop_func_end to the start and end
4451 of the range containing the stop pc. If this range
4452 contains the entry pc for the block (which is always the
4453 case for contiguous blocks), advance stop_func_start past
4454 the function's start offset and entrypoint. Note that
4455 stop_func_start is NOT advanced when in a range of a
4456 non-contiguous block that does not contain the entry pc. */
4457 if (block
!= nullptr
4458 && ecs
->stop_func_start
<= BLOCK_ENTRY_PC (block
)
4459 && BLOCK_ENTRY_PC (block
) < ecs
->stop_func_end
)
4461 ecs
->stop_func_start
4462 += gdbarch_deprecated_function_start_offset (gdbarch
);
4464 if (gdbarch_skip_entrypoint_p (gdbarch
))
4465 ecs
->stop_func_start
4466 = gdbarch_skip_entrypoint (gdbarch
, ecs
->stop_func_start
);
4469 ecs
->stop_func_filled_in
= 1;
4474 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
4476 static enum stop_kind
4477 get_inferior_stop_soon (execution_control_state
*ecs
)
4479 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
4481 gdb_assert (inf
!= NULL
);
4482 return inf
->control
.stop_soon
;
4485 /* Poll for one event out of the current target. Store the resulting
4486 waitstatus in WS, and return the event ptid. Does not block. */
4489 poll_one_curr_target (struct target_waitstatus
*ws
)
4493 overlay_cache_invalid
= 1;
4495 /* Flush target cache before starting to handle each event.
4496 Target was running and cache could be stale. This is just a
4497 heuristic. Running threads may modify target memory, but we
4498 don't get any event. */
4499 target_dcache_invalidate ();
4501 if (deprecated_target_wait_hook
)
4502 event_ptid
= deprecated_target_wait_hook (minus_one_ptid
, ws
, TARGET_WNOHANG
);
4504 event_ptid
= target_wait (minus_one_ptid
, ws
, TARGET_WNOHANG
);
4507 print_target_wait_results (minus_one_ptid
, event_ptid
, ws
);
4512 /* An event reported by wait_one. */
4514 struct wait_one_event
4516 /* The target the event came out of. */
4517 process_stratum_target
*target
;
4519 /* The PTID the event was for. */
4522 /* The waitstatus. */
4523 target_waitstatus ws
;
4526 /* Wait for one event out of any target. */
4528 static wait_one_event
4533 for (inferior
*inf
: all_inferiors ())
4535 process_stratum_target
*target
= inf
->process_target ();
4537 || !target
->is_async_p ()
4538 || !target
->threads_executing
)
4541 switch_to_inferior_no_thread (inf
);
4543 wait_one_event event
;
4544 event
.target
= target
;
4545 event
.ptid
= poll_one_curr_target (&event
.ws
);
4547 if (event
.ws
.kind
== TARGET_WAITKIND_NO_RESUMED
)
4549 /* If nothing is resumed, remove the target from the
4553 else if (event
.ws
.kind
!= TARGET_WAITKIND_IGNORE
)
4557 /* Block waiting for some event. */
4564 for (inferior
*inf
: all_inferiors ())
4566 process_stratum_target
*target
= inf
->process_target ();
4568 || !target
->is_async_p ()
4569 || !target
->threads_executing
)
4572 int fd
= target
->async_wait_fd ();
4573 FD_SET (fd
, &readfds
);
4580 /* No waitable targets left. All must be stopped. */
4581 return {NULL
, minus_one_ptid
, {TARGET_WAITKIND_NO_RESUMED
}};
4586 int numfds
= interruptible_select (nfds
, &readfds
, 0, NULL
, 0);
4592 perror_with_name ("interruptible_select");
4597 /* Save the thread's event and stop reason to process it later. */
4600 save_waitstatus (struct thread_info
*tp
, const target_waitstatus
*ws
)
4604 std::string statstr
= target_waitstatus_to_string (ws
);
4606 fprintf_unfiltered (gdb_stdlog
,
4607 "infrun: saving status %s for %d.%ld.%ld\n",
4614 /* Record for later. */
4615 tp
->suspend
.waitstatus
= *ws
;
4616 tp
->suspend
.waitstatus_pending_p
= 1;
4618 struct regcache
*regcache
= get_thread_regcache (tp
);
4619 const address_space
*aspace
= regcache
->aspace ();
4621 if (ws
->kind
== TARGET_WAITKIND_STOPPED
4622 && ws
->value
.sig
== GDB_SIGNAL_TRAP
)
4624 CORE_ADDR pc
= regcache_read_pc (regcache
);
4626 adjust_pc_after_break (tp
, &tp
->suspend
.waitstatus
);
4628 scoped_restore_current_thread restore_thread
;
4629 switch_to_thread (tp
);
4631 if (target_stopped_by_watchpoint ())
4633 tp
->suspend
.stop_reason
4634 = TARGET_STOPPED_BY_WATCHPOINT
;
4636 else if (target_supports_stopped_by_sw_breakpoint ()
4637 && target_stopped_by_sw_breakpoint ())
4639 tp
->suspend
.stop_reason
4640 = TARGET_STOPPED_BY_SW_BREAKPOINT
;
4642 else if (target_supports_stopped_by_hw_breakpoint ()
4643 && target_stopped_by_hw_breakpoint ())
4645 tp
->suspend
.stop_reason
4646 = TARGET_STOPPED_BY_HW_BREAKPOINT
;
4648 else if (!target_supports_stopped_by_hw_breakpoint ()
4649 && hardware_breakpoint_inserted_here_p (aspace
,
4652 tp
->suspend
.stop_reason
4653 = TARGET_STOPPED_BY_HW_BREAKPOINT
;
4655 else if (!target_supports_stopped_by_sw_breakpoint ()
4656 && software_breakpoint_inserted_here_p (aspace
,
4659 tp
->suspend
.stop_reason
4660 = TARGET_STOPPED_BY_SW_BREAKPOINT
;
4662 else if (!thread_has_single_step_breakpoints_set (tp
)
4663 && currently_stepping (tp
))
4665 tp
->suspend
.stop_reason
4666 = TARGET_STOPPED_BY_SINGLE_STEP
;
4671 /* Mark the non-executing threads accordingly. In all-stop, all
4672 threads of all processes are stopped when we get any event
4673 reported. In non-stop mode, only the event thread stops. */
4676 mark_non_executing_threads (process_stratum_target
*target
,
4678 struct target_waitstatus ws
)
4682 if (!target_is_non_stop_p ())
4683 mark_ptid
= minus_one_ptid
;
4684 else if (ws
.kind
== TARGET_WAITKIND_SIGNALLED
4685 || ws
.kind
== TARGET_WAITKIND_EXITED
)
4687 /* If we're handling a process exit in non-stop mode, even
4688 though threads haven't been deleted yet, one would think
4689 that there is nothing to do, as threads of the dead process
4690 will be soon deleted, and threads of any other process were
4691 left running. However, on some targets, threads survive a
4692 process exit event. E.g., for the "checkpoint" command,
4693 when the current checkpoint/fork exits, linux-fork.c
4694 automatically switches to another fork from within
4695 target_mourn_inferior, by associating the same
4696 inferior/thread to another fork. We haven't mourned yet at
4697 this point, but we must mark any threads left in the
4698 process as not-executing so that finish_thread_state marks
4699 them stopped (in the user's perspective) if/when we present
4700 the stop to the user. */
4701 mark_ptid
= ptid_t (event_ptid
.pid ());
4704 mark_ptid
= event_ptid
;
4706 set_executing (target
, mark_ptid
, false);
4708 /* Likewise the resumed flag. */
4709 set_resumed (target
, mark_ptid
, false);
4715 stop_all_threads (void)
4717 /* We may need multiple passes to discover all threads. */
4721 gdb_assert (exists_non_stop_target ());
4724 fprintf_unfiltered (gdb_stdlog
, "infrun: stop_all_threads\n");
4726 scoped_restore_current_thread restore_thread
;
4728 /* Enable thread events of all targets. */
4729 for (auto *target
: all_non_exited_process_targets ())
4731 switch_to_target_no_thread (target
);
4732 target_thread_events (true);
4737 /* Disable thread events of all targets. */
4738 for (auto *target
: all_non_exited_process_targets ())
4740 switch_to_target_no_thread (target
);
4741 target_thread_events (false);
4745 fprintf_unfiltered (gdb_stdlog
, "infrun: stop_all_threads done\n");
4748 /* Request threads to stop, and then wait for the stops. Because
4749 threads we already know about can spawn more threads while we're
4750 trying to stop them, and we only learn about new threads when we
4751 update the thread list, do this in a loop, and keep iterating
4752 until two passes find no threads that need to be stopped. */
4753 for (pass
= 0; pass
< 2; pass
++, iterations
++)
4756 fprintf_unfiltered (gdb_stdlog
,
4757 "infrun: stop_all_threads, pass=%d, "
4758 "iterations=%d\n", pass
, iterations
);
4761 int waits_needed
= 0;
4763 for (auto *target
: all_non_exited_process_targets ())
4765 switch_to_target_no_thread (target
);
4766 update_thread_list ();
4769 /* Go through all threads looking for threads that we need
4770 to tell the target to stop. */
4771 for (thread_info
*t
: all_non_exited_threads ())
4773 /* For a single-target setting with an all-stop target,
4774 we would not even arrive here. For a multi-target
4775 setting, until GDB is able to handle a mixture of
4776 all-stop and non-stop targets, simply skip all-stop
4777 targets' threads. This should be fine due to the
4778 protection of 'check_multi_target_resumption'. */
4780 switch_to_thread_no_regs (t
);
4781 if (!target_is_non_stop_p ())
4786 /* If already stopping, don't request a stop again.
4787 We just haven't seen the notification yet. */
4788 if (!t
->stop_requested
)
4791 fprintf_unfiltered (gdb_stdlog
,
4792 "infrun: %s executing, "
4794 target_pid_to_str (t
->ptid
).c_str ());
4795 target_stop (t
->ptid
);
4796 t
->stop_requested
= 1;
4801 fprintf_unfiltered (gdb_stdlog
,
4802 "infrun: %s executing, "
4803 "already stopping\n",
4804 target_pid_to_str (t
->ptid
).c_str ());
4807 if (t
->stop_requested
)
4813 fprintf_unfiltered (gdb_stdlog
,
4814 "infrun: %s not executing\n",
4815 target_pid_to_str (t
->ptid
).c_str ());
4817 /* The thread may be not executing, but still be
4818 resumed with a pending status to process. */
4823 if (waits_needed
== 0)
4826 /* If we find new threads on the second iteration, restart
4827 over. We want to see two iterations in a row with all
4832 for (int i
= 0; i
< waits_needed
; i
++)
4834 wait_one_event event
= wait_one ();
4838 fprintf_unfiltered (gdb_stdlog
,
4839 "infrun: stop_all_threads %s %s\n",
4840 target_waitstatus_to_string (&event
.ws
).c_str (),
4841 target_pid_to_str (event
.ptid
).c_str ());
4844 if (event
.ws
.kind
== TARGET_WAITKIND_NO_RESUMED
)
4846 /* All resumed threads exited. */
4849 else if (event
.ws
.kind
== TARGET_WAITKIND_THREAD_EXITED
4850 || event
.ws
.kind
== TARGET_WAITKIND_EXITED
4851 || event
.ws
.kind
== TARGET_WAITKIND_SIGNALLED
)
4853 /* One thread/process exited/signalled. */
4855 thread_info
*t
= nullptr;
4857 /* The target may have reported just a pid. If so, try
4858 the first non-exited thread. */
4859 if (event
.ptid
.is_pid ())
4861 int pid
= event
.ptid
.pid ();
4862 inferior
*inf
= find_inferior_pid (event
.target
, pid
);
4863 for (thread_info
*tp
: inf
->non_exited_threads ())
4869 /* If there is no available thread, the event would
4870 have to be appended to a per-inferior event list,
4871 which does not exist (and if it did, we'd have
4872 to adjust run control command to be able to
4873 resume such an inferior). We assert here instead
4874 of going into an infinite loop. */
4875 gdb_assert (t
!= nullptr);
4878 fprintf_unfiltered (gdb_stdlog
,
4879 "infrun: stop_all_threads, using %s\n",
4880 target_pid_to_str (t
->ptid
).c_str ());
4884 t
= find_thread_ptid (event
.target
, event
.ptid
);
4885 /* Check if this is the first time we see this thread.
4886 Don't bother adding if it individually exited. */
4888 && event
.ws
.kind
!= TARGET_WAITKIND_THREAD_EXITED
)
4889 t
= add_thread (event
.target
, event
.ptid
);
4894 /* Set the threads as non-executing to avoid
4895 another stop attempt on them. */
4896 switch_to_thread_no_regs (t
);
4897 mark_non_executing_threads (event
.target
, event
.ptid
,
4899 save_waitstatus (t
, &event
.ws
);
4900 t
->stop_requested
= false;
4905 thread_info
*t
= find_thread_ptid (event
.target
, event
.ptid
);
4907 t
= add_thread (event
.target
, event
.ptid
);
4909 t
->stop_requested
= 0;
4912 t
->control
.may_range_step
= 0;
4914 /* This may be the first time we see the inferior report
4916 inferior
*inf
= find_inferior_ptid (event
.target
, event
.ptid
);
4917 if (inf
->needs_setup
)
4919 switch_to_thread_no_regs (t
);
4923 if (event
.ws
.kind
== TARGET_WAITKIND_STOPPED
4924 && event
.ws
.value
.sig
== GDB_SIGNAL_0
)
4926 /* We caught the event that we intended to catch, so
4927 there's no event pending. */
4928 t
->suspend
.waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
4929 t
->suspend
.waitstatus_pending_p
= 0;
4931 if (displaced_step_fixup (t
, GDB_SIGNAL_0
) < 0)
4933 /* Add it back to the step-over queue. */
4936 fprintf_unfiltered (gdb_stdlog
,
4937 "infrun: displaced-step of %s "
4938 "canceled: adding back to the "
4939 "step-over queue\n",
4940 target_pid_to_str (t
->ptid
).c_str ());
4942 t
->control
.trap_expected
= 0;
4943 thread_step_over_chain_enqueue (t
);
4948 enum gdb_signal sig
;
4949 struct regcache
*regcache
;
4953 std::string statstr
= target_waitstatus_to_string (&event
.ws
);
4955 fprintf_unfiltered (gdb_stdlog
,
4956 "infrun: target_wait %s, saving "
4957 "status for %d.%ld.%ld\n",
4964 /* Record for later. */
4965 save_waitstatus (t
, &event
.ws
);
4967 sig
= (event
.ws
.kind
== TARGET_WAITKIND_STOPPED
4968 ? event
.ws
.value
.sig
: GDB_SIGNAL_0
);
4970 if (displaced_step_fixup (t
, sig
) < 0)
4972 /* Add it back to the step-over queue. */
4973 t
->control
.trap_expected
= 0;
4974 thread_step_over_chain_enqueue (t
);
4977 regcache
= get_thread_regcache (t
);
4978 t
->suspend
.stop_pc
= regcache_read_pc (regcache
);
4982 fprintf_unfiltered (gdb_stdlog
,
4983 "infrun: saved stop_pc=%s for %s "
4984 "(currently_stepping=%d)\n",
4985 paddress (target_gdbarch (),
4986 t
->suspend
.stop_pc
),
4987 target_pid_to_str (t
->ptid
).c_str (),
4988 currently_stepping (t
));
4997 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5000 handle_no_resumed (struct execution_control_state
*ecs
)
5002 if (target_can_async_p ())
5006 for (ui
*ui
: all_uis ())
5008 if (ui
->prompt_state
== PROMPT_BLOCKED
)
5016 /* There were no unwaited-for children left in the target, but,
5017 we're not synchronously waiting for events either. Just
5021 fprintf_unfiltered (gdb_stdlog
,
5022 "infrun: TARGET_WAITKIND_NO_RESUMED "
5023 "(ignoring: bg)\n");
5024 prepare_to_wait (ecs
);
5029 /* Otherwise, if we were running a synchronous execution command, we
5030 may need to cancel it and give the user back the terminal.
5032 In non-stop mode, the target can't tell whether we've already
5033 consumed previous stop events, so it can end up sending us a
5034 no-resumed event like so:
5036 #0 - thread 1 is left stopped
5038 #1 - thread 2 is resumed and hits breakpoint
5039 -> TARGET_WAITKIND_STOPPED
5041 #2 - thread 3 is resumed and exits
5042 this is the last resumed thread, so
5043 -> TARGET_WAITKIND_NO_RESUMED
5045 #3 - gdb processes stop for thread 2 and decides to re-resume
5048 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5049 thread 2 is now resumed, so the event should be ignored.
5051 IOW, if the stop for thread 2 doesn't end a foreground command,
5052 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5053 event. But it could be that the event meant that thread 2 itself
5054 (or whatever other thread was the last resumed thread) exited.
5056 To address this we refresh the thread list and check whether we
5057 have resumed threads _now_. In the example above, this removes
5058 thread 3 from the thread list. If thread 2 was re-resumed, we
5059 ignore this event. If we find no thread resumed, then we cancel
5060 the synchronous command and show "no unwaited-for " to the
5063 inferior
*curr_inf
= current_inferior ();
5065 scoped_restore_current_thread restore_thread
;
5067 for (auto *target
: all_non_exited_process_targets ())
5069 switch_to_target_no_thread (target
);
5070 update_thread_list ();
5075 - the current target has no thread executing, and
5076 - the current inferior is native, and
5077 - the current inferior is the one which has the terminal, and
5080 then a Ctrl-C from this point on would remain stuck in the
5081 kernel, until a thread resumes and dequeues it. That would
5082 result in the GDB CLI not reacting to Ctrl-C, not able to
5083 interrupt the program. To address this, if the current inferior
5084 no longer has any thread executing, we give the terminal to some
5085 other inferior that has at least one thread executing. */
5086 bool swap_terminal
= true;
5088 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5089 whether to report it to the user. */
5090 bool ignore_event
= false;
5092 for (thread_info
*thread
: all_non_exited_threads ())
5094 if (swap_terminal
&& thread
->executing
)
5096 if (thread
->inf
!= curr_inf
)
5098 target_terminal::ours ();
5100 switch_to_thread (thread
);
5101 target_terminal::inferior ();
5103 swap_terminal
= false;
5107 && (thread
->executing
5108 || thread
->suspend
.waitstatus_pending_p
))
5110 /* Either there were no unwaited-for children left in the
5111 target at some point, but there are now, or some target
5112 other than the eventing one has unwaited-for children
5113 left. Just ignore. */
5115 fprintf_unfiltered (gdb_stdlog
,
5116 "infrun: TARGET_WAITKIND_NO_RESUMED "
5117 "(ignoring: found resumed)\n");
5119 ignore_event
= true;
5122 if (ignore_event
&& !swap_terminal
)
5128 switch_to_inferior_no_thread (curr_inf
);
5129 prepare_to_wait (ecs
);
5133 /* Go ahead and report the event. */
5137 /* Given an execution control state that has been freshly filled in by
5138 an event from the inferior, figure out what it means and take
5141 The alternatives are:
5143 1) stop_waiting and return; to really stop and return to the
5146 2) keep_going and return; to wait for the next event (set
5147 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5151 handle_inferior_event (struct execution_control_state
*ecs
)
5153 /* Make sure that all temporary struct value objects that were
5154 created during the handling of the event get deleted at the
5156 scoped_value_mark free_values
;
5158 enum stop_kind stop_soon
;
5161 fprintf_unfiltered (gdb_stdlog
, "infrun: handle_inferior_event %s\n",
5162 target_waitstatus_to_string (&ecs
->ws
).c_str ());
5164 if (ecs
->ws
.kind
== TARGET_WAITKIND_IGNORE
)
5166 /* We had an event in the inferior, but we are not interested in
5167 handling it at this level. The lower layers have already
5168 done what needs to be done, if anything.
5170 One of the possible circumstances for this is when the
5171 inferior produces output for the console. The inferior has
5172 not stopped, and we are ignoring the event. Another possible
5173 circumstance is any event which the lower level knows will be
5174 reported multiple times without an intervening resume. */
5175 prepare_to_wait (ecs
);
5179 if (ecs
->ws
.kind
== TARGET_WAITKIND_THREAD_EXITED
)
5181 prepare_to_wait (ecs
);
5185 if (ecs
->ws
.kind
== TARGET_WAITKIND_NO_RESUMED
5186 && handle_no_resumed (ecs
))
5189 /* Cache the last target/ptid/waitstatus. */
5190 set_last_target_status (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5192 /* Always clear state belonging to the previous time we stopped. */
5193 stop_stack_dummy
= STOP_NONE
;
5195 if (ecs
->ws
.kind
== TARGET_WAITKIND_NO_RESUMED
)
5197 /* No unwaited-for children left. IOW, all resumed children
5199 stop_print_frame
= 0;
5204 if (ecs
->ws
.kind
!= TARGET_WAITKIND_EXITED
5205 && ecs
->ws
.kind
!= TARGET_WAITKIND_SIGNALLED
)
5207 ecs
->event_thread
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5208 /* If it's a new thread, add it to the thread database. */
5209 if (ecs
->event_thread
== NULL
)
5210 ecs
->event_thread
= add_thread (ecs
->target
, ecs
->ptid
);
5212 /* Disable range stepping. If the next step request could use a
5213 range, this will be end up re-enabled then. */
5214 ecs
->event_thread
->control
.may_range_step
= 0;
5217 /* Dependent on valid ECS->EVENT_THREAD. */
5218 adjust_pc_after_break (ecs
->event_thread
, &ecs
->ws
);
5220 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5221 reinit_frame_cache ();
5223 breakpoint_retire_moribund ();
5225 /* First, distinguish signals caused by the debugger from signals
5226 that have to do with the program's own actions. Note that
5227 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5228 on the operating system version. Here we detect when a SIGILL or
5229 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5230 something similar for SIGSEGV, since a SIGSEGV will be generated
5231 when we're trying to execute a breakpoint instruction on a
5232 non-executable stack. This happens for call dummy breakpoints
5233 for architectures like SPARC that place call dummies on the
5235 if (ecs
->ws
.kind
== TARGET_WAITKIND_STOPPED
5236 && (ecs
->ws
.value
.sig
== GDB_SIGNAL_ILL
5237 || ecs
->ws
.value
.sig
== GDB_SIGNAL_SEGV
5238 || ecs
->ws
.value
.sig
== GDB_SIGNAL_EMT
))
5240 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5242 if (breakpoint_inserted_here_p (regcache
->aspace (),
5243 regcache_read_pc (regcache
)))
5246 fprintf_unfiltered (gdb_stdlog
,
5247 "infrun: Treating signal as SIGTRAP\n");
5248 ecs
->ws
.value
.sig
= GDB_SIGNAL_TRAP
;
5252 mark_non_executing_threads (ecs
->target
, ecs
->ptid
, ecs
->ws
);
5254 switch (ecs
->ws
.kind
)
5256 case TARGET_WAITKIND_LOADED
:
5257 context_switch (ecs
);
5258 /* Ignore gracefully during startup of the inferior, as it might
5259 be the shell which has just loaded some objects, otherwise
5260 add the symbols for the newly loaded objects. Also ignore at
5261 the beginning of an attach or remote session; we will query
5262 the full list of libraries once the connection is
5265 stop_soon
= get_inferior_stop_soon (ecs
);
5266 if (stop_soon
== NO_STOP_QUIETLY
)
5268 struct regcache
*regcache
;
5270 regcache
= get_thread_regcache (ecs
->event_thread
);
5272 handle_solib_event ();
5274 ecs
->event_thread
->control
.stop_bpstat
5275 = bpstat_stop_status (regcache
->aspace (),
5276 ecs
->event_thread
->suspend
.stop_pc
,
5277 ecs
->event_thread
, &ecs
->ws
);
5279 if (handle_stop_requested (ecs
))
5282 if (bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5284 /* A catchpoint triggered. */
5285 process_event_stop_test (ecs
);
5289 /* If requested, stop when the dynamic linker notifies
5290 gdb of events. This allows the user to get control
5291 and place breakpoints in initializer routines for
5292 dynamically loaded objects (among other things). */
5293 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
5294 if (stop_on_solib_events
)
5296 /* Make sure we print "Stopped due to solib-event" in
5298 stop_print_frame
= 1;
5305 /* If we are skipping through a shell, or through shared library
5306 loading that we aren't interested in, resume the program. If
5307 we're running the program normally, also resume. */
5308 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== NO_STOP_QUIETLY
)
5310 /* Loading of shared libraries might have changed breakpoint
5311 addresses. Make sure new breakpoints are inserted. */
5312 if (stop_soon
== NO_STOP_QUIETLY
)
5313 insert_breakpoints ();
5314 resume (GDB_SIGNAL_0
);
5315 prepare_to_wait (ecs
);
5319 /* But stop if we're attaching or setting up a remote
5321 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
5322 || stop_soon
== STOP_QUIETLY_REMOTE
)
5325 fprintf_unfiltered (gdb_stdlog
, "infrun: quietly stopped\n");
5330 internal_error (__FILE__
, __LINE__
,
5331 _("unhandled stop_soon: %d"), (int) stop_soon
);
5333 case TARGET_WAITKIND_SPURIOUS
:
5334 if (handle_stop_requested (ecs
))
5336 context_switch (ecs
);
5337 resume (GDB_SIGNAL_0
);
5338 prepare_to_wait (ecs
);
5341 case TARGET_WAITKIND_THREAD_CREATED
:
5342 if (handle_stop_requested (ecs
))
5344 context_switch (ecs
);
5345 if (!switch_back_to_stepped_thread (ecs
))
5349 case TARGET_WAITKIND_EXITED
:
5350 case TARGET_WAITKIND_SIGNALLED
:
5352 /* Depending on the system, ecs->ptid may point to a thread or
5353 to a process. On some targets, target_mourn_inferior may
5354 need to have access to the just-exited thread. That is the
5355 case of GNU/Linux's "checkpoint" support, for example.
5356 Call the switch_to_xxx routine as appropriate. */
5357 thread_info
*thr
= find_thread_ptid (ecs
->target
, ecs
->ptid
);
5359 switch_to_thread (thr
);
5362 inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5363 switch_to_inferior_no_thread (inf
);
5366 handle_vfork_child_exec_or_exit (0);
5367 target_terminal::ours (); /* Must do this before mourn anyway. */
5369 /* Clearing any previous state of convenience variables. */
5370 clear_exit_convenience_vars ();
5372 if (ecs
->ws
.kind
== TARGET_WAITKIND_EXITED
)
5374 /* Record the exit code in the convenience variable $_exitcode, so
5375 that the user can inspect this again later. */
5376 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5377 (LONGEST
) ecs
->ws
.value
.integer
);
5379 /* Also record this in the inferior itself. */
5380 current_inferior ()->has_exit_code
= 1;
5381 current_inferior ()->exit_code
= (LONGEST
) ecs
->ws
.value
.integer
;
5383 /* Support the --return-child-result option. */
5384 return_child_result_value
= ecs
->ws
.value
.integer
;
5386 gdb::observers::exited
.notify (ecs
->ws
.value
.integer
);
5390 struct gdbarch
*gdbarch
= current_inferior ()->gdbarch
;
5392 if (gdbarch_gdb_signal_to_target_p (gdbarch
))
5394 /* Set the value of the internal variable $_exitsignal,
5395 which holds the signal uncaught by the inferior. */
5396 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5397 gdbarch_gdb_signal_to_target (gdbarch
,
5398 ecs
->ws
.value
.sig
));
5402 /* We don't have access to the target's method used for
5403 converting between signal numbers (GDB's internal
5404 representation <-> target's representation).
5405 Therefore, we cannot do a good job at displaying this
5406 information to the user. It's better to just warn
5407 her about it (if infrun debugging is enabled), and
5410 fprintf_filtered (gdb_stdlog
, _("\
5411 Cannot fill $_exitsignal with the correct signal number.\n"));
5414 gdb::observers::signal_exited
.notify (ecs
->ws
.value
.sig
);
5417 gdb_flush (gdb_stdout
);
5418 target_mourn_inferior (inferior_ptid
);
5419 stop_print_frame
= 0;
5423 case TARGET_WAITKIND_FORKED
:
5424 case TARGET_WAITKIND_VFORKED
:
5425 /* Check whether the inferior is displaced stepping. */
5427 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5428 struct gdbarch
*gdbarch
= regcache
->arch ();
5430 /* If checking displaced stepping is supported, and thread
5431 ecs->ptid is displaced stepping. */
5432 if (displaced_step_in_progress_thread (ecs
->event_thread
))
5434 struct inferior
*parent_inf
5435 = find_inferior_ptid (ecs
->target
, ecs
->ptid
);
5436 struct regcache
*child_regcache
;
5437 CORE_ADDR parent_pc
;
5439 if (ecs
->ws
.kind
== TARGET_WAITKIND_FORKED
)
5441 struct displaced_step_inferior_state
*displaced
5442 = get_displaced_stepping_state (parent_inf
);
5444 /* Restore scratch pad for child process. */
5445 displaced_step_restore (displaced
, ecs
->ws
.value
.related_pid
);
5448 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5449 indicating that the displaced stepping of syscall instruction
5450 has been done. Perform cleanup for parent process here. Note
5451 that this operation also cleans up the child process for vfork,
5452 because their pages are shared. */
5453 displaced_step_fixup (ecs
->event_thread
, GDB_SIGNAL_TRAP
);
5454 /* Start a new step-over in another thread if there's one
5458 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5459 the child's PC is also within the scratchpad. Set the child's PC
5460 to the parent's PC value, which has already been fixed up.
5461 FIXME: we use the parent's aspace here, although we're touching
5462 the child, because the child hasn't been added to the inferior
5463 list yet at this point. */
5466 = get_thread_arch_aspace_regcache (parent_inf
->process_target (),
5467 ecs
->ws
.value
.related_pid
,
5469 parent_inf
->aspace
);
5470 /* Read PC value of parent process. */
5471 parent_pc
= regcache_read_pc (regcache
);
5473 if (debug_displaced
)
5474 fprintf_unfiltered (gdb_stdlog
,
5475 "displaced: write child pc from %s to %s\n",
5477 regcache_read_pc (child_regcache
)),
5478 paddress (gdbarch
, parent_pc
));
5480 regcache_write_pc (child_regcache
, parent_pc
);
5484 context_switch (ecs
);
5486 /* Immediately detach breakpoints from the child before there's
5487 any chance of letting the user delete breakpoints from the
5488 breakpoint lists. If we don't do this early, it's easy to
5489 leave left over traps in the child, vis: "break foo; catch
5490 fork; c; <fork>; del; c; <child calls foo>". We only follow
5491 the fork on the last `continue', and by that time the
5492 breakpoint at "foo" is long gone from the breakpoint table.
5493 If we vforked, then we don't need to unpatch here, since both
5494 parent and child are sharing the same memory pages; we'll
5495 need to unpatch at follow/detach time instead to be certain
5496 that new breakpoints added between catchpoint hit time and
5497 vfork follow are detached. */
5498 if (ecs
->ws
.kind
!= TARGET_WAITKIND_VFORKED
)
5500 /* This won't actually modify the breakpoint list, but will
5501 physically remove the breakpoints from the child. */
5502 detach_breakpoints (ecs
->ws
.value
.related_pid
);
5505 delete_just_stopped_threads_single_step_breakpoints ();
5507 /* In case the event is caught by a catchpoint, remember that
5508 the event is to be followed at the next resume of the thread,
5509 and not immediately. */
5510 ecs
->event_thread
->pending_follow
= ecs
->ws
;
5512 ecs
->event_thread
->suspend
.stop_pc
5513 = regcache_read_pc (get_thread_regcache (ecs
->event_thread
));
5515 ecs
->event_thread
->control
.stop_bpstat
5516 = bpstat_stop_status (get_current_regcache ()->aspace (),
5517 ecs
->event_thread
->suspend
.stop_pc
,
5518 ecs
->event_thread
, &ecs
->ws
);
5520 if (handle_stop_requested (ecs
))
5523 /* If no catchpoint triggered for this, then keep going. Note
5524 that we're interested in knowing the bpstat actually causes a
5525 stop, not just if it may explain the signal. Software
5526 watchpoints, for example, always appear in the bpstat. */
5527 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5530 = (follow_fork_mode_string
== follow_fork_mode_child
);
5532 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
5534 process_stratum_target
*targ
5535 = ecs
->event_thread
->inf
->process_target ();
5537 bool should_resume
= follow_fork ();
5539 /* Note that one of these may be an invalid pointer,
5540 depending on detach_fork. */
5541 thread_info
*parent
= ecs
->event_thread
;
5543 = find_thread_ptid (targ
, ecs
->ws
.value
.related_pid
);
5545 /* At this point, the parent is marked running, and the
5546 child is marked stopped. */
5548 /* If not resuming the parent, mark it stopped. */
5549 if (follow_child
&& !detach_fork
&& !non_stop
&& !sched_multi
)
5550 parent
->set_running (false);
5552 /* If resuming the child, mark it running. */
5553 if (follow_child
|| (!detach_fork
&& (non_stop
|| sched_multi
)))
5554 child
->set_running (true);
5556 /* In non-stop mode, also resume the other branch. */
5557 if (!detach_fork
&& (non_stop
5558 || (sched_multi
&& target_is_non_stop_p ())))
5561 switch_to_thread (parent
);
5563 switch_to_thread (child
);
5565 ecs
->event_thread
= inferior_thread ();
5566 ecs
->ptid
= inferior_ptid
;
5571 switch_to_thread (child
);
5573 switch_to_thread (parent
);
5575 ecs
->event_thread
= inferior_thread ();
5576 ecs
->ptid
= inferior_ptid
;
5584 process_event_stop_test (ecs
);
5587 case TARGET_WAITKIND_VFORK_DONE
:
5588 /* Done with the shared memory region. Re-insert breakpoints in
5589 the parent, and keep going. */
5591 context_switch (ecs
);
5593 current_inferior ()->waiting_for_vfork_done
= 0;
5594 current_inferior ()->pspace
->breakpoints_not_allowed
= 0;
5596 if (handle_stop_requested (ecs
))
5599 /* This also takes care of reinserting breakpoints in the
5600 previously locked inferior. */
5604 case TARGET_WAITKIND_EXECD
:
5606 /* Note we can't read registers yet (the stop_pc), because we
5607 don't yet know the inferior's post-exec architecture.
5608 'stop_pc' is explicitly read below instead. */
5609 switch_to_thread_no_regs (ecs
->event_thread
);
5611 /* Do whatever is necessary to the parent branch of the vfork. */
5612 handle_vfork_child_exec_or_exit (1);
5614 /* This causes the eventpoints and symbol table to be reset.
5615 Must do this now, before trying to determine whether to
5617 follow_exec (inferior_ptid
, ecs
->ws
.value
.execd_pathname
);
5619 /* In follow_exec we may have deleted the original thread and
5620 created a new one. Make sure that the event thread is the
5621 execd thread for that case (this is a nop otherwise). */
5622 ecs
->event_thread
= inferior_thread ();
5624 ecs
->event_thread
->suspend
.stop_pc
5625 = regcache_read_pc (get_thread_regcache (ecs
->event_thread
));
5627 ecs
->event_thread
->control
.stop_bpstat
5628 = bpstat_stop_status (get_current_regcache ()->aspace (),
5629 ecs
->event_thread
->suspend
.stop_pc
,
5630 ecs
->event_thread
, &ecs
->ws
);
5632 /* Note that this may be referenced from inside
5633 bpstat_stop_status above, through inferior_has_execd. */
5634 xfree (ecs
->ws
.value
.execd_pathname
);
5635 ecs
->ws
.value
.execd_pathname
= NULL
;
5637 if (handle_stop_requested (ecs
))
5640 /* If no catchpoint triggered for this, then keep going. */
5641 if (!bpstat_causes_stop (ecs
->event_thread
->control
.stop_bpstat
))
5643 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
5647 process_event_stop_test (ecs
);
5650 /* Be careful not to try to gather much state about a thread
5651 that's in a syscall. It's frequently a losing proposition. */
5652 case TARGET_WAITKIND_SYSCALL_ENTRY
:
5653 /* Getting the current syscall number. */
5654 if (handle_syscall_event (ecs
) == 0)
5655 process_event_stop_test (ecs
);
5658 /* Before examining the threads further, step this thread to
5659 get it entirely out of the syscall. (We get notice of the
5660 event when the thread is just on the verge of exiting a
5661 syscall. Stepping one instruction seems to get it back
5663 case TARGET_WAITKIND_SYSCALL_RETURN
:
5664 if (handle_syscall_event (ecs
) == 0)
5665 process_event_stop_test (ecs
);
5668 case TARGET_WAITKIND_STOPPED
:
5669 handle_signal_stop (ecs
);
5672 case TARGET_WAITKIND_NO_HISTORY
:
5673 /* Reverse execution: target ran out of history info. */
5675 /* Switch to the stopped thread. */
5676 context_switch (ecs
);
5678 fprintf_unfiltered (gdb_stdlog
, "infrun: stopped\n");
5680 delete_just_stopped_threads_single_step_breakpoints ();
5681 ecs
->event_thread
->suspend
.stop_pc
5682 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
5684 if (handle_stop_requested (ecs
))
5687 gdb::observers::no_history
.notify ();
5693 /* Restart threads back to what they were trying to do back when we
5694 paused them for an in-line step-over. The EVENT_THREAD thread is
5698 restart_threads (struct thread_info
*event_thread
)
5700 /* In case the instruction just stepped spawned a new thread. */
5701 update_thread_list ();
5703 for (thread_info
*tp
: all_non_exited_threads ())
5705 switch_to_thread_no_regs (tp
);
5707 if (tp
== event_thread
)
5710 fprintf_unfiltered (gdb_stdlog
,
5711 "infrun: restart threads: "
5712 "[%s] is event thread\n",
5713 target_pid_to_str (tp
->ptid
).c_str ());
5717 if (!(tp
->state
== THREAD_RUNNING
|| tp
->control
.in_infcall
))
5720 fprintf_unfiltered (gdb_stdlog
,
5721 "infrun: restart threads: "
5722 "[%s] not meant to be running\n",
5723 target_pid_to_str (tp
->ptid
).c_str ());
5730 fprintf_unfiltered (gdb_stdlog
,
5731 "infrun: restart threads: [%s] resumed\n",
5732 target_pid_to_str (tp
->ptid
).c_str ());
5733 gdb_assert (tp
->executing
|| tp
->suspend
.waitstatus_pending_p
);
5737 if (thread_is_in_step_over_chain (tp
))
5740 fprintf_unfiltered (gdb_stdlog
,
5741 "infrun: restart threads: "
5742 "[%s] needs step-over\n",
5743 target_pid_to_str (tp
->ptid
).c_str ());
5744 gdb_assert (!tp
->resumed
);
5749 if (tp
->suspend
.waitstatus_pending_p
)
5752 fprintf_unfiltered (gdb_stdlog
,
5753 "infrun: restart threads: "
5754 "[%s] has pending status\n",
5755 target_pid_to_str (tp
->ptid
).c_str ());
5760 gdb_assert (!tp
->stop_requested
);
5762 /* If some thread needs to start a step-over at this point, it
5763 should still be in the step-over queue, and thus skipped
5765 if (thread_still_needs_step_over (tp
))
5767 internal_error (__FILE__
, __LINE__
,
5768 "thread [%s] needs a step-over, but not in "
5769 "step-over queue\n",
5770 target_pid_to_str (tp
->ptid
).c_str ());
5773 if (currently_stepping (tp
))
5776 fprintf_unfiltered (gdb_stdlog
,
5777 "infrun: restart threads: [%s] was stepping\n",
5778 target_pid_to_str (tp
->ptid
).c_str ());
5779 keep_going_stepped_thread (tp
);
5783 struct execution_control_state ecss
;
5784 struct execution_control_state
*ecs
= &ecss
;
5787 fprintf_unfiltered (gdb_stdlog
,
5788 "infrun: restart threads: [%s] continuing\n",
5789 target_pid_to_str (tp
->ptid
).c_str ());
5790 reset_ecs (ecs
, tp
);
5791 switch_to_thread (tp
);
5792 keep_going_pass_signal (ecs
);
5797 /* Callback for iterate_over_threads. Find a resumed thread that has
5798 a pending waitstatus. */
5801 resumed_thread_with_pending_status (struct thread_info
*tp
,
5805 && tp
->suspend
.waitstatus_pending_p
);
5808 /* Called when we get an event that may finish an in-line or
5809 out-of-line (displaced stepping) step-over started previously.
5810 Return true if the event is processed and we should go back to the
5811 event loop; false if the caller should continue processing the
5815 finish_step_over (struct execution_control_state
*ecs
)
5817 int had_step_over_info
;
5819 displaced_step_fixup (ecs
->event_thread
,
5820 ecs
->event_thread
->suspend
.stop_signal
);
5822 had_step_over_info
= step_over_info_valid_p ();
5824 if (had_step_over_info
)
5826 /* If we're stepping over a breakpoint with all threads locked,
5827 then only the thread that was stepped should be reporting
5829 gdb_assert (ecs
->event_thread
->control
.trap_expected
);
5831 clear_step_over_info ();
5834 if (!target_is_non_stop_p ())
5837 /* Start a new step-over in another thread if there's one that
5841 /* If we were stepping over a breakpoint before, and haven't started
5842 a new in-line step-over sequence, then restart all other threads
5843 (except the event thread). We can't do this in all-stop, as then
5844 e.g., we wouldn't be able to issue any other remote packet until
5845 these other threads stop. */
5846 if (had_step_over_info
&& !step_over_info_valid_p ())
5848 struct thread_info
*pending
;
5850 /* If we only have threads with pending statuses, the restart
5851 below won't restart any thread and so nothing re-inserts the
5852 breakpoint we just stepped over. But we need it inserted
5853 when we later process the pending events, otherwise if
5854 another thread has a pending event for this breakpoint too,
5855 we'd discard its event (because the breakpoint that
5856 originally caused the event was no longer inserted). */
5857 context_switch (ecs
);
5858 insert_breakpoints ();
5860 restart_threads (ecs
->event_thread
);
5862 /* If we have events pending, go through handle_inferior_event
5863 again, picking up a pending event at random. This avoids
5864 thread starvation. */
5866 /* But not if we just stepped over a watchpoint in order to let
5867 the instruction execute so we can evaluate its expression.
5868 The set of watchpoints that triggered is recorded in the
5869 breakpoint objects themselves (see bp->watchpoint_triggered).
5870 If we processed another event first, that other event could
5871 clobber this info. */
5872 if (ecs
->event_thread
->stepping_over_watchpoint
)
5875 pending
= iterate_over_threads (resumed_thread_with_pending_status
,
5877 if (pending
!= NULL
)
5879 struct thread_info
*tp
= ecs
->event_thread
;
5880 struct regcache
*regcache
;
5884 fprintf_unfiltered (gdb_stdlog
,
5885 "infrun: found resumed threads with "
5886 "pending events, saving status\n");
5889 gdb_assert (pending
!= tp
);
5891 /* Record the event thread's event for later. */
5892 save_waitstatus (tp
, &ecs
->ws
);
5893 /* This was cleared early, by handle_inferior_event. Set it
5894 so this pending event is considered by
5898 gdb_assert (!tp
->executing
);
5900 regcache
= get_thread_regcache (tp
);
5901 tp
->suspend
.stop_pc
= regcache_read_pc (regcache
);
5905 fprintf_unfiltered (gdb_stdlog
,
5906 "infrun: saved stop_pc=%s for %s "
5907 "(currently_stepping=%d)\n",
5908 paddress (target_gdbarch (),
5909 tp
->suspend
.stop_pc
),
5910 target_pid_to_str (tp
->ptid
).c_str (),
5911 currently_stepping (tp
));
5914 /* This in-line step-over finished; clear this so we won't
5915 start a new one. This is what handle_signal_stop would
5916 do, if we returned false. */
5917 tp
->stepping_over_breakpoint
= 0;
5919 /* Wake up the event loop again. */
5920 mark_async_event_handler (infrun_async_inferior_event_token
);
5922 prepare_to_wait (ecs
);
5930 /* Come here when the program has stopped with a signal. */
5933 handle_signal_stop (struct execution_control_state
*ecs
)
5935 struct frame_info
*frame
;
5936 struct gdbarch
*gdbarch
;
5937 int stopped_by_watchpoint
;
5938 enum stop_kind stop_soon
;
5941 gdb_assert (ecs
->ws
.kind
== TARGET_WAITKIND_STOPPED
);
5943 ecs
->event_thread
->suspend
.stop_signal
= ecs
->ws
.value
.sig
;
5945 /* Do we need to clean up the state of a thread that has
5946 completed a displaced single-step? (Doing so usually affects
5947 the PC, so do it here, before we set stop_pc.) */
5948 if (finish_step_over (ecs
))
5951 /* If we either finished a single-step or hit a breakpoint, but
5952 the user wanted this thread to be stopped, pretend we got a
5953 SIG0 (generic unsignaled stop). */
5954 if (ecs
->event_thread
->stop_requested
5955 && ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
)
5956 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
5958 ecs
->event_thread
->suspend
.stop_pc
5959 = regcache_read_pc (get_thread_regcache (ecs
->event_thread
));
5963 struct regcache
*regcache
= get_thread_regcache (ecs
->event_thread
);
5964 struct gdbarch
*reg_gdbarch
= regcache
->arch ();
5966 switch_to_thread (ecs
->event_thread
);
5968 fprintf_unfiltered (gdb_stdlog
, "infrun: stop_pc = %s\n",
5969 paddress (reg_gdbarch
,
5970 ecs
->event_thread
->suspend
.stop_pc
));
5971 if (target_stopped_by_watchpoint ())
5975 fprintf_unfiltered (gdb_stdlog
, "infrun: stopped by watchpoint\n");
5977 if (target_stopped_data_address (current_top_target (), &addr
))
5978 fprintf_unfiltered (gdb_stdlog
,
5979 "infrun: stopped data address = %s\n",
5980 paddress (reg_gdbarch
, addr
));
5982 fprintf_unfiltered (gdb_stdlog
,
5983 "infrun: (no data address available)\n");
5987 /* This is originated from start_remote(), start_inferior() and
5988 shared libraries hook functions. */
5989 stop_soon
= get_inferior_stop_soon (ecs
);
5990 if (stop_soon
== STOP_QUIETLY
|| stop_soon
== STOP_QUIETLY_REMOTE
)
5992 context_switch (ecs
);
5994 fprintf_unfiltered (gdb_stdlog
, "infrun: quietly stopped\n");
5995 stop_print_frame
= 1;
6000 /* This originates from attach_command(). We need to overwrite
6001 the stop_signal here, because some kernels don't ignore a
6002 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6003 See more comments in inferior.h. On the other hand, if we
6004 get a non-SIGSTOP, report it to the user - assume the backend
6005 will handle the SIGSTOP if it should show up later.
6007 Also consider that the attach is complete when we see a
6008 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6009 target extended-remote report it instead of a SIGSTOP
6010 (e.g. gdbserver). We already rely on SIGTRAP being our
6011 signal, so this is no exception.
6013 Also consider that the attach is complete when we see a
6014 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6015 the target to stop all threads of the inferior, in case the
6016 low level attach operation doesn't stop them implicitly. If
6017 they weren't stopped implicitly, then the stub will report a
6018 GDB_SIGNAL_0, meaning: stopped for no particular reason
6019 other than GDB's request. */
6020 if (stop_soon
== STOP_QUIETLY_NO_SIGSTOP
6021 && (ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_STOP
6022 || ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
6023 || ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_0
))
6025 stop_print_frame
= 1;
6027 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
6031 /* See if something interesting happened to the non-current thread. If
6032 so, then switch to that thread. */
6033 if (ecs
->ptid
!= inferior_ptid
)
6036 fprintf_unfiltered (gdb_stdlog
, "infrun: context switch\n");
6038 context_switch (ecs
);
6040 if (deprecated_context_hook
)
6041 deprecated_context_hook (ecs
->event_thread
->global_num
);
6044 /* At this point, get hold of the now-current thread's frame. */
6045 frame
= get_current_frame ();
6046 gdbarch
= get_frame_arch (frame
);
6048 /* Pull the single step breakpoints out of the target. */
6049 if (ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
)
6051 struct regcache
*regcache
;
6054 regcache
= get_thread_regcache (ecs
->event_thread
);
6055 const address_space
*aspace
= regcache
->aspace ();
6057 pc
= regcache_read_pc (regcache
);
6059 /* However, before doing so, if this single-step breakpoint was
6060 actually for another thread, set this thread up for moving
6062 if (!thread_has_single_step_breakpoint_here (ecs
->event_thread
,
6065 if (single_step_breakpoint_inserted_here_p (aspace
, pc
))
6069 fprintf_unfiltered (gdb_stdlog
,
6070 "infrun: [%s] hit another thread's "
6071 "single-step breakpoint\n",
6072 target_pid_to_str (ecs
->ptid
).c_str ());
6074 ecs
->hit_singlestep_breakpoint
= 1;
6081 fprintf_unfiltered (gdb_stdlog
,
6082 "infrun: [%s] hit its "
6083 "single-step breakpoint\n",
6084 target_pid_to_str (ecs
->ptid
).c_str ());
6088 delete_just_stopped_threads_single_step_breakpoints ();
6090 if (ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
6091 && ecs
->event_thread
->control
.trap_expected
6092 && ecs
->event_thread
->stepping_over_watchpoint
)
6093 stopped_by_watchpoint
= 0;
6095 stopped_by_watchpoint
= watchpoints_triggered (&ecs
->ws
);
6097 /* If necessary, step over this watchpoint. We'll be back to display
6099 if (stopped_by_watchpoint
6100 && (target_have_steppable_watchpoint
6101 || gdbarch_have_nonsteppable_watchpoint (gdbarch
)))
6103 /* At this point, we are stopped at an instruction which has
6104 attempted to write to a piece of memory under control of
6105 a watchpoint. The instruction hasn't actually executed
6106 yet. If we were to evaluate the watchpoint expression
6107 now, we would get the old value, and therefore no change
6108 would seem to have occurred.
6110 In order to make watchpoints work `right', we really need
6111 to complete the memory write, and then evaluate the
6112 watchpoint expression. We do this by single-stepping the
6115 It may not be necessary to disable the watchpoint to step over
6116 it. For example, the PA can (with some kernel cooperation)
6117 single step over a watchpoint without disabling the watchpoint.
6119 It is far more common to need to disable a watchpoint to step
6120 the inferior over it. If we have non-steppable watchpoints,
6121 we must disable the current watchpoint; it's simplest to
6122 disable all watchpoints.
6124 Any breakpoint at PC must also be stepped over -- if there's
6125 one, it will have already triggered before the watchpoint
6126 triggered, and we either already reported it to the user, or
6127 it didn't cause a stop and we called keep_going. In either
6128 case, if there was a breakpoint at PC, we must be trying to
6130 ecs
->event_thread
->stepping_over_watchpoint
= 1;
6135 ecs
->event_thread
->stepping_over_breakpoint
= 0;
6136 ecs
->event_thread
->stepping_over_watchpoint
= 0;
6137 bpstat_clear (&ecs
->event_thread
->control
.stop_bpstat
);
6138 ecs
->event_thread
->control
.stop_step
= 0;
6139 stop_print_frame
= 1;
6140 stopped_by_random_signal
= 0;
6141 bpstat stop_chain
= NULL
;
6143 /* Hide inlined functions starting here, unless we just performed stepi or
6144 nexti. After stepi and nexti, always show the innermost frame (not any
6145 inline function call sites). */
6146 if (ecs
->event_thread
->control
.step_range_end
!= 1)
6148 const address_space
*aspace
6149 = get_thread_regcache (ecs
->event_thread
)->aspace ();
6151 /* skip_inline_frames is expensive, so we avoid it if we can
6152 determine that the address is one where functions cannot have
6153 been inlined. This improves performance with inferiors that
6154 load a lot of shared libraries, because the solib event
6155 breakpoint is defined as the address of a function (i.e. not
6156 inline). Note that we have to check the previous PC as well
6157 as the current one to catch cases when we have just
6158 single-stepped off a breakpoint prior to reinstating it.
6159 Note that we're assuming that the code we single-step to is
6160 not inline, but that's not definitive: there's nothing
6161 preventing the event breakpoint function from containing
6162 inlined code, and the single-step ending up there. If the
6163 user had set a breakpoint on that inlined code, the missing
6164 skip_inline_frames call would break things. Fortunately
6165 that's an extremely unlikely scenario. */
6166 if (!pc_at_non_inline_function (aspace
,
6167 ecs
->event_thread
->suspend
.stop_pc
,
6169 && !(ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
6170 && ecs
->event_thread
->control
.trap_expected
6171 && pc_at_non_inline_function (aspace
,
6172 ecs
->event_thread
->prev_pc
,
6175 stop_chain
= build_bpstat_chain (aspace
,
6176 ecs
->event_thread
->suspend
.stop_pc
,
6178 skip_inline_frames (ecs
->event_thread
, stop_chain
);
6180 /* Re-fetch current thread's frame in case that invalidated
6182 frame
= get_current_frame ();
6183 gdbarch
= get_frame_arch (frame
);
6187 if (ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
6188 && ecs
->event_thread
->control
.trap_expected
6189 && gdbarch_single_step_through_delay_p (gdbarch
)
6190 && currently_stepping (ecs
->event_thread
))
6192 /* We're trying to step off a breakpoint. Turns out that we're
6193 also on an instruction that needs to be stepped multiple
6194 times before it's been fully executing. E.g., architectures
6195 with a delay slot. It needs to be stepped twice, once for
6196 the instruction and once for the delay slot. */
6197 int step_through_delay
6198 = gdbarch_single_step_through_delay (gdbarch
, frame
);
6200 if (debug_infrun
&& step_through_delay
)
6201 fprintf_unfiltered (gdb_stdlog
, "infrun: step through delay\n");
6202 if (ecs
->event_thread
->control
.step_range_end
== 0
6203 && step_through_delay
)
6205 /* The user issued a continue when stopped at a breakpoint.
6206 Set up for another trap and get out of here. */
6207 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6211 else if (step_through_delay
)
6213 /* The user issued a step when stopped at a breakpoint.
6214 Maybe we should stop, maybe we should not - the delay
6215 slot *might* correspond to a line of source. In any
6216 case, don't decide that here, just set
6217 ecs->stepping_over_breakpoint, making sure we
6218 single-step again before breakpoints are re-inserted. */
6219 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6223 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6224 handles this event. */
6225 ecs
->event_thread
->control
.stop_bpstat
6226 = bpstat_stop_status (get_current_regcache ()->aspace (),
6227 ecs
->event_thread
->suspend
.stop_pc
,
6228 ecs
->event_thread
, &ecs
->ws
, stop_chain
);
6230 /* Following in case break condition called a
6232 stop_print_frame
= 1;
6234 /* This is where we handle "moribund" watchpoints. Unlike
6235 software breakpoints traps, hardware watchpoint traps are
6236 always distinguishable from random traps. If no high-level
6237 watchpoint is associated with the reported stop data address
6238 anymore, then the bpstat does not explain the signal ---
6239 simply make sure to ignore it if `stopped_by_watchpoint' is
6243 && ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
6244 && !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6246 && stopped_by_watchpoint
)
6247 fprintf_unfiltered (gdb_stdlog
,
6248 "infrun: no user watchpoint explains "
6249 "watchpoint SIGTRAP, ignoring\n");
6251 /* NOTE: cagney/2003-03-29: These checks for a random signal
6252 at one stage in the past included checks for an inferior
6253 function call's call dummy's return breakpoint. The original
6254 comment, that went with the test, read:
6256 ``End of a stack dummy. Some systems (e.g. Sony news) give
6257 another signal besides SIGTRAP, so check here as well as
6260 If someone ever tries to get call dummys on a
6261 non-executable stack to work (where the target would stop
6262 with something like a SIGSEGV), then those tests might need
6263 to be re-instated. Given, however, that the tests were only
6264 enabled when momentary breakpoints were not being used, I
6265 suspect that it won't be the case.
6267 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6268 be necessary for call dummies on a non-executable stack on
6271 /* See if the breakpoints module can explain the signal. */
6273 = !bpstat_explains_signal (ecs
->event_thread
->control
.stop_bpstat
,
6274 ecs
->event_thread
->suspend
.stop_signal
);
6276 /* Maybe this was a trap for a software breakpoint that has since
6278 if (random_signal
&& target_stopped_by_sw_breakpoint ())
6280 if (gdbarch_program_breakpoint_here_p (gdbarch
,
6281 ecs
->event_thread
->suspend
.stop_pc
))
6283 struct regcache
*regcache
;
6286 /* Re-adjust PC to what the program would see if GDB was not
6288 regcache
= get_thread_regcache (ecs
->event_thread
);
6289 decr_pc
= gdbarch_decr_pc_after_break (gdbarch
);
6292 gdb::optional
<scoped_restore_tmpl
<int>>
6293 restore_operation_disable
;
6295 if (record_full_is_used ())
6296 restore_operation_disable
.emplace
6297 (record_full_gdb_operation_disable_set ());
6299 regcache_write_pc (regcache
,
6300 ecs
->event_thread
->suspend
.stop_pc
+ decr_pc
);
6305 /* A delayed software breakpoint event. Ignore the trap. */
6307 fprintf_unfiltered (gdb_stdlog
,
6308 "infrun: delayed software breakpoint "
6309 "trap, ignoring\n");
6314 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6315 has since been removed. */
6316 if (random_signal
&& target_stopped_by_hw_breakpoint ())
6318 /* A delayed hardware breakpoint event. Ignore the trap. */
6320 fprintf_unfiltered (gdb_stdlog
,
6321 "infrun: delayed hardware breakpoint/watchpoint "
6322 "trap, ignoring\n");
6326 /* If not, perhaps stepping/nexting can. */
6328 random_signal
= !(ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
6329 && currently_stepping (ecs
->event_thread
));
6331 /* Perhaps the thread hit a single-step breakpoint of _another_
6332 thread. Single-step breakpoints are transparent to the
6333 breakpoints module. */
6335 random_signal
= !ecs
->hit_singlestep_breakpoint
;
6337 /* No? Perhaps we got a moribund watchpoint. */
6339 random_signal
= !stopped_by_watchpoint
;
6341 /* Always stop if the user explicitly requested this thread to
6343 if (ecs
->event_thread
->stop_requested
)
6347 fprintf_unfiltered (gdb_stdlog
, "infrun: user-requested stop\n");
6350 /* For the program's own signals, act according to
6351 the signal handling tables. */
6355 /* Signal not for debugging purposes. */
6356 struct inferior
*inf
= find_inferior_ptid (ecs
->target
, ecs
->ptid
);
6357 enum gdb_signal stop_signal
= ecs
->event_thread
->suspend
.stop_signal
;
6360 fprintf_unfiltered (gdb_stdlog
, "infrun: random signal (%s)\n",
6361 gdb_signal_to_symbol_string (stop_signal
));
6363 stopped_by_random_signal
= 1;
6365 /* Always stop on signals if we're either just gaining control
6366 of the program, or the user explicitly requested this thread
6367 to remain stopped. */
6368 if (stop_soon
!= NO_STOP_QUIETLY
6369 || ecs
->event_thread
->stop_requested
6371 && signal_stop_state (ecs
->event_thread
->suspend
.stop_signal
)))
6377 /* Notify observers the signal has "handle print" set. Note we
6378 returned early above if stopping; normal_stop handles the
6379 printing in that case. */
6380 if (signal_print
[ecs
->event_thread
->suspend
.stop_signal
])
6382 /* The signal table tells us to print about this signal. */
6383 target_terminal::ours_for_output ();
6384 gdb::observers::signal_received
.notify (ecs
->event_thread
->suspend
.stop_signal
);
6385 target_terminal::inferior ();
6388 /* Clear the signal if it should not be passed. */
6389 if (signal_program
[ecs
->event_thread
->suspend
.stop_signal
] == 0)
6390 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
6392 if (ecs
->event_thread
->prev_pc
== ecs
->event_thread
->suspend
.stop_pc
6393 && ecs
->event_thread
->control
.trap_expected
6394 && ecs
->event_thread
->control
.step_resume_breakpoint
== NULL
)
6396 /* We were just starting a new sequence, attempting to
6397 single-step off of a breakpoint and expecting a SIGTRAP.
6398 Instead this signal arrives. This signal will take us out
6399 of the stepping range so GDB needs to remember to, when
6400 the signal handler returns, resume stepping off that
6402 /* To simplify things, "continue" is forced to use the same
6403 code paths as single-step - set a breakpoint at the
6404 signal return address and then, once hit, step off that
6407 fprintf_unfiltered (gdb_stdlog
,
6408 "infrun: signal arrived while stepping over "
6411 insert_hp_step_resume_breakpoint_at_frame (frame
);
6412 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6413 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6414 ecs
->event_thread
->control
.trap_expected
= 0;
6416 /* If we were nexting/stepping some other thread, switch to
6417 it, so that we don't continue it, losing control. */
6418 if (!switch_back_to_stepped_thread (ecs
))
6423 if (ecs
->event_thread
->suspend
.stop_signal
!= GDB_SIGNAL_0
6424 && (pc_in_thread_step_range (ecs
->event_thread
->suspend
.stop_pc
,
6426 || ecs
->event_thread
->control
.step_range_end
== 1)
6427 && frame_id_eq (get_stack_frame_id (frame
),
6428 ecs
->event_thread
->control
.step_stack_frame_id
)
6429 && ecs
->event_thread
->control
.step_resume_breakpoint
== NULL
)
6431 /* The inferior is about to take a signal that will take it
6432 out of the single step range. Set a breakpoint at the
6433 current PC (which is presumably where the signal handler
6434 will eventually return) and then allow the inferior to
6437 Note that this is only needed for a signal delivered
6438 while in the single-step range. Nested signals aren't a
6439 problem as they eventually all return. */
6441 fprintf_unfiltered (gdb_stdlog
,
6442 "infrun: signal may take us out of "
6443 "single-step range\n");
6445 clear_step_over_info ();
6446 insert_hp_step_resume_breakpoint_at_frame (frame
);
6447 ecs
->event_thread
->step_after_step_resume_breakpoint
= 1;
6448 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6449 ecs
->event_thread
->control
.trap_expected
= 0;
6454 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6455 when either there's a nested signal, or when there's a
6456 pending signal enabled just as the signal handler returns
6457 (leaving the inferior at the step-resume-breakpoint without
6458 actually executing it). Either way continue until the
6459 breakpoint is really hit. */
6461 if (!switch_back_to_stepped_thread (ecs
))
6464 fprintf_unfiltered (gdb_stdlog
,
6465 "infrun: random signal, keep going\n");
6472 process_event_stop_test (ecs
);
6475 /* Come here when we've got some debug event / signal we can explain
6476 (IOW, not a random signal), and test whether it should cause a
6477 stop, or whether we should resume the inferior (transparently).
6478 E.g., could be a breakpoint whose condition evaluates false; we
6479 could be still stepping within the line; etc. */
6482 process_event_stop_test (struct execution_control_state
*ecs
)
6484 struct symtab_and_line stop_pc_sal
;
6485 struct frame_info
*frame
;
6486 struct gdbarch
*gdbarch
;
6487 CORE_ADDR jmp_buf_pc
;
6488 struct bpstat_what what
;
6490 /* Handle cases caused by hitting a breakpoint. */
6492 frame
= get_current_frame ();
6493 gdbarch
= get_frame_arch (frame
);
6495 what
= bpstat_what (ecs
->event_thread
->control
.stop_bpstat
);
6497 if (what
.call_dummy
)
6499 stop_stack_dummy
= what
.call_dummy
;
6502 /* A few breakpoint types have callbacks associated (e.g.,
6503 bp_jit_event). Run them now. */
6504 bpstat_run_callbacks (ecs
->event_thread
->control
.stop_bpstat
);
6506 /* If we hit an internal event that triggers symbol changes, the
6507 current frame will be invalidated within bpstat_what (e.g., if we
6508 hit an internal solib event). Re-fetch it. */
6509 frame
= get_current_frame ();
6510 gdbarch
= get_frame_arch (frame
);
6512 switch (what
.main_action
)
6514 case BPSTAT_WHAT_SET_LONGJMP_RESUME
:
6515 /* If we hit the breakpoint at longjmp while stepping, we
6516 install a momentary breakpoint at the target of the
6520 fprintf_unfiltered (gdb_stdlog
,
6521 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
6523 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6525 if (what
.is_longjmp
)
6527 struct value
*arg_value
;
6529 /* If we set the longjmp breakpoint via a SystemTap probe,
6530 then use it to extract the arguments. The destination PC
6531 is the third argument to the probe. */
6532 arg_value
= probe_safe_evaluate_at_pc (frame
, 2);
6535 jmp_buf_pc
= value_as_address (arg_value
);
6536 jmp_buf_pc
= gdbarch_addr_bits_remove (gdbarch
, jmp_buf_pc
);
6538 else if (!gdbarch_get_longjmp_target_p (gdbarch
)
6539 || !gdbarch_get_longjmp_target (gdbarch
,
6540 frame
, &jmp_buf_pc
))
6543 fprintf_unfiltered (gdb_stdlog
,
6544 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
6545 "(!gdbarch_get_longjmp_target)\n");
6550 /* Insert a breakpoint at resume address. */
6551 insert_longjmp_resume_breakpoint (gdbarch
, jmp_buf_pc
);
6554 check_exception_resume (ecs
, frame
);
6558 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME
:
6560 struct frame_info
*init_frame
;
6562 /* There are several cases to consider.
6564 1. The initiating frame no longer exists. In this case we
6565 must stop, because the exception or longjmp has gone too
6568 2. The initiating frame exists, and is the same as the
6569 current frame. We stop, because the exception or longjmp
6572 3. The initiating frame exists and is different from the
6573 current frame. This means the exception or longjmp has
6574 been caught beneath the initiating frame, so keep going.
6576 4. longjmp breakpoint has been placed just to protect
6577 against stale dummy frames and user is not interested in
6578 stopping around longjmps. */
6581 fprintf_unfiltered (gdb_stdlog
,
6582 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
6584 gdb_assert (ecs
->event_thread
->control
.exception_resume_breakpoint
6586 delete_exception_resume_breakpoint (ecs
->event_thread
);
6588 if (what
.is_longjmp
)
6590 check_longjmp_breakpoint_for_call_dummy (ecs
->event_thread
);
6592 if (!frame_id_p (ecs
->event_thread
->initiating_frame
))
6600 init_frame
= frame_find_by_id (ecs
->event_thread
->initiating_frame
);
6604 struct frame_id current_id
6605 = get_frame_id (get_current_frame ());
6606 if (frame_id_eq (current_id
,
6607 ecs
->event_thread
->initiating_frame
))
6609 /* Case 2. Fall through. */
6619 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6621 delete_step_resume_breakpoint (ecs
->event_thread
);
6623 end_stepping_range (ecs
);
6627 case BPSTAT_WHAT_SINGLE
:
6629 fprintf_unfiltered (gdb_stdlog
, "infrun: BPSTAT_WHAT_SINGLE\n");
6630 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6631 /* Still need to check other stuff, at least the case where we
6632 are stepping and step out of the right range. */
6635 case BPSTAT_WHAT_STEP_RESUME
:
6637 fprintf_unfiltered (gdb_stdlog
, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
6639 delete_step_resume_breakpoint (ecs
->event_thread
);
6640 if (ecs
->event_thread
->control
.proceed_to_finish
6641 && execution_direction
== EXEC_REVERSE
)
6643 struct thread_info
*tp
= ecs
->event_thread
;
6645 /* We are finishing a function in reverse, and just hit the
6646 step-resume breakpoint at the start address of the
6647 function, and we're almost there -- just need to back up
6648 by one more single-step, which should take us back to the
6650 tp
->control
.step_range_start
= tp
->control
.step_range_end
= 1;
6654 fill_in_stop_func (gdbarch
, ecs
);
6655 if (ecs
->event_thread
->suspend
.stop_pc
== ecs
->stop_func_start
6656 && execution_direction
== EXEC_REVERSE
)
6658 /* We are stepping over a function call in reverse, and just
6659 hit the step-resume breakpoint at the start address of
6660 the function. Go back to single-stepping, which should
6661 take us back to the function call. */
6662 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6668 case BPSTAT_WHAT_STOP_NOISY
:
6670 fprintf_unfiltered (gdb_stdlog
, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
6671 stop_print_frame
= 1;
6673 /* Assume the thread stopped for a breakpoint. We'll still check
6674 whether a/the breakpoint is there when the thread is next
6676 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6681 case BPSTAT_WHAT_STOP_SILENT
:
6683 fprintf_unfiltered (gdb_stdlog
, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
6684 stop_print_frame
= 0;
6686 /* Assume the thread stopped for a breakpoint. We'll still check
6687 whether a/the breakpoint is there when the thread is next
6689 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6693 case BPSTAT_WHAT_HP_STEP_RESUME
:
6695 fprintf_unfiltered (gdb_stdlog
, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
6697 delete_step_resume_breakpoint (ecs
->event_thread
);
6698 if (ecs
->event_thread
->step_after_step_resume_breakpoint
)
6700 /* Back when the step-resume breakpoint was inserted, we
6701 were trying to single-step off a breakpoint. Go back to
6703 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6704 ecs
->event_thread
->stepping_over_breakpoint
= 1;
6710 case BPSTAT_WHAT_KEEP_CHECKING
:
6714 /* If we stepped a permanent breakpoint and we had a high priority
6715 step-resume breakpoint for the address we stepped, but we didn't
6716 hit it, then we must have stepped into the signal handler. The
6717 step-resume was only necessary to catch the case of _not_
6718 stepping into the handler, so delete it, and fall through to
6719 checking whether the step finished. */
6720 if (ecs
->event_thread
->stepped_breakpoint
)
6722 struct breakpoint
*sr_bp
6723 = ecs
->event_thread
->control
.step_resume_breakpoint
;
6726 && sr_bp
->loc
->permanent
6727 && sr_bp
->type
== bp_hp_step_resume
6728 && sr_bp
->loc
->address
== ecs
->event_thread
->prev_pc
)
6731 fprintf_unfiltered (gdb_stdlog
,
6732 "infrun: stepped permanent breakpoint, stopped in "
6734 delete_step_resume_breakpoint (ecs
->event_thread
);
6735 ecs
->event_thread
->step_after_step_resume_breakpoint
= 0;
6739 /* We come here if we hit a breakpoint but should not stop for it.
6740 Possibly we also were stepping and should stop for that. So fall
6741 through and test for stepping. But, if not stepping, do not
6744 /* In all-stop mode, if we're currently stepping but have stopped in
6745 some other thread, we need to switch back to the stepped thread. */
6746 if (switch_back_to_stepped_thread (ecs
))
6749 if (ecs
->event_thread
->control
.step_resume_breakpoint
)
6752 fprintf_unfiltered (gdb_stdlog
,
6753 "infrun: step-resume breakpoint is inserted\n");
6755 /* Having a step-resume breakpoint overrides anything
6756 else having to do with stepping commands until
6757 that breakpoint is reached. */
6762 if (ecs
->event_thread
->control
.step_range_end
== 0)
6765 fprintf_unfiltered (gdb_stdlog
, "infrun: no stepping, continue\n");
6766 /* Likewise if we aren't even stepping. */
6771 /* Re-fetch current thread's frame in case the code above caused
6772 the frame cache to be re-initialized, making our FRAME variable
6773 a dangling pointer. */
6774 frame
= get_current_frame ();
6775 gdbarch
= get_frame_arch (frame
);
6776 fill_in_stop_func (gdbarch
, ecs
);
6778 /* If stepping through a line, keep going if still within it.
6780 Note that step_range_end is the address of the first instruction
6781 beyond the step range, and NOT the address of the last instruction
6784 Note also that during reverse execution, we may be stepping
6785 through a function epilogue and therefore must detect when
6786 the current-frame changes in the middle of a line. */
6788 if (pc_in_thread_step_range (ecs
->event_thread
->suspend
.stop_pc
,
6790 && (execution_direction
!= EXEC_REVERSE
6791 || frame_id_eq (get_frame_id (frame
),
6792 ecs
->event_thread
->control
.step_frame_id
)))
6796 (gdb_stdlog
, "infrun: stepping inside range [%s-%s]\n",
6797 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_start
),
6798 paddress (gdbarch
, ecs
->event_thread
->control
.step_range_end
));
6800 /* Tentatively re-enable range stepping; `resume' disables it if
6801 necessary (e.g., if we're stepping over a breakpoint or we
6802 have software watchpoints). */
6803 ecs
->event_thread
->control
.may_range_step
= 1;
6805 /* When stepping backward, stop at beginning of line range
6806 (unless it's the function entry point, in which case
6807 keep going back to the call point). */
6808 CORE_ADDR stop_pc
= ecs
->event_thread
->suspend
.stop_pc
;
6809 if (stop_pc
== ecs
->event_thread
->control
.step_range_start
6810 && stop_pc
!= ecs
->stop_func_start
6811 && execution_direction
== EXEC_REVERSE
)
6812 end_stepping_range (ecs
);
6819 /* We stepped out of the stepping range. */
6821 /* If we are stepping at the source level and entered the runtime
6822 loader dynamic symbol resolution code...
6824 EXEC_FORWARD: we keep on single stepping until we exit the run
6825 time loader code and reach the callee's address.
6827 EXEC_REVERSE: we've already executed the callee (backward), and
6828 the runtime loader code is handled just like any other
6829 undebuggable function call. Now we need only keep stepping
6830 backward through the trampoline code, and that's handled further
6831 down, so there is nothing for us to do here. */
6833 if (execution_direction
!= EXEC_REVERSE
6834 && ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6835 && in_solib_dynsym_resolve_code (ecs
->event_thread
->suspend
.stop_pc
))
6837 CORE_ADDR pc_after_resolver
=
6838 gdbarch_skip_solib_resolver (gdbarch
,
6839 ecs
->event_thread
->suspend
.stop_pc
);
6842 fprintf_unfiltered (gdb_stdlog
,
6843 "infrun: stepped into dynsym resolve code\n");
6845 if (pc_after_resolver
)
6847 /* Set up a step-resume breakpoint at the address
6848 indicated by SKIP_SOLIB_RESOLVER. */
6849 symtab_and_line sr_sal
;
6850 sr_sal
.pc
= pc_after_resolver
;
6851 sr_sal
.pspace
= get_frame_program_space (frame
);
6853 insert_step_resume_breakpoint_at_sal (gdbarch
,
6854 sr_sal
, null_frame_id
);
6861 /* Step through an indirect branch thunk. */
6862 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
6863 && gdbarch_in_indirect_branch_thunk (gdbarch
,
6864 ecs
->event_thread
->suspend
.stop_pc
))
6867 fprintf_unfiltered (gdb_stdlog
,
6868 "infrun: stepped into indirect branch thunk\n");
6873 if (ecs
->event_thread
->control
.step_range_end
!= 1
6874 && (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
6875 || ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
6876 && get_frame_type (frame
) == SIGTRAMP_FRAME
)
6879 fprintf_unfiltered (gdb_stdlog
,
6880 "infrun: stepped into signal trampoline\n");
6881 /* The inferior, while doing a "step" or "next", has ended up in
6882 a signal trampoline (either by a signal being delivered or by
6883 the signal handler returning). Just single-step until the
6884 inferior leaves the trampoline (either by calling the handler
6890 /* If we're in the return path from a shared library trampoline,
6891 we want to proceed through the trampoline when stepping. */
6892 /* macro/2012-04-25: This needs to come before the subroutine
6893 call check below as on some targets return trampolines look
6894 like subroutine calls (MIPS16 return thunks). */
6895 if (gdbarch_in_solib_return_trampoline (gdbarch
,
6896 ecs
->event_thread
->suspend
.stop_pc
,
6897 ecs
->stop_func_name
)
6898 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
6900 /* Determine where this trampoline returns. */
6901 CORE_ADDR stop_pc
= ecs
->event_thread
->suspend
.stop_pc
;
6902 CORE_ADDR real_stop_pc
6903 = gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
6906 fprintf_unfiltered (gdb_stdlog
,
6907 "infrun: stepped into solib return tramp\n");
6909 /* Only proceed through if we know where it's going. */
6912 /* And put the step-breakpoint there and go until there. */
6913 symtab_and_line sr_sal
;
6914 sr_sal
.pc
= real_stop_pc
;
6915 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
6916 sr_sal
.pspace
= get_frame_program_space (frame
);
6918 /* Do not specify what the fp should be when we stop since
6919 on some machines the prologue is where the new fp value
6921 insert_step_resume_breakpoint_at_sal (gdbarch
,
6922 sr_sal
, null_frame_id
);
6924 /* Restart without fiddling with the step ranges or
6931 /* Check for subroutine calls. The check for the current frame
6932 equalling the step ID is not necessary - the check of the
6933 previous frame's ID is sufficient - but it is a common case and
6934 cheaper than checking the previous frame's ID.
6936 NOTE: frame_id_eq will never report two invalid frame IDs as
6937 being equal, so to get into this block, both the current and
6938 previous frame must have valid frame IDs. */
6939 /* The outer_frame_id check is a heuristic to detect stepping
6940 through startup code. If we step over an instruction which
6941 sets the stack pointer from an invalid value to a valid value,
6942 we may detect that as a subroutine call from the mythical
6943 "outermost" function. This could be fixed by marking
6944 outermost frames as !stack_p,code_p,special_p. Then the
6945 initial outermost frame, before sp was valid, would
6946 have code_addr == &_start. See the comment in frame_id_eq
6948 if (!frame_id_eq (get_stack_frame_id (frame
),
6949 ecs
->event_thread
->control
.step_stack_frame_id
)
6950 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
6951 ecs
->event_thread
->control
.step_stack_frame_id
)
6952 && (!frame_id_eq (ecs
->event_thread
->control
.step_stack_frame_id
,
6954 || (ecs
->event_thread
->control
.step_start_function
6955 != find_pc_function (ecs
->event_thread
->suspend
.stop_pc
)))))
6957 CORE_ADDR stop_pc
= ecs
->event_thread
->suspend
.stop_pc
;
6958 CORE_ADDR real_stop_pc
;
6961 fprintf_unfiltered (gdb_stdlog
, "infrun: stepped into subroutine\n");
6963 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_NONE
)
6965 /* I presume that step_over_calls is only 0 when we're
6966 supposed to be stepping at the assembly language level
6967 ("stepi"). Just stop. */
6968 /* And this works the same backward as frontward. MVS */
6969 end_stepping_range (ecs
);
6973 /* Reverse stepping through solib trampolines. */
6975 if (execution_direction
== EXEC_REVERSE
6976 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
6977 && (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
6978 || (ecs
->stop_func_start
== 0
6979 && in_solib_dynsym_resolve_code (stop_pc
))))
6981 /* Any solib trampoline code can be handled in reverse
6982 by simply continuing to single-step. We have already
6983 executed the solib function (backwards), and a few
6984 steps will take us back through the trampoline to the
6990 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
)
6992 /* We're doing a "next".
6994 Normal (forward) execution: set a breakpoint at the
6995 callee's return address (the address at which the caller
6998 Reverse (backward) execution. set the step-resume
6999 breakpoint at the start of the function that we just
7000 stepped into (backwards), and continue to there. When we
7001 get there, we'll need to single-step back to the caller. */
7003 if (execution_direction
== EXEC_REVERSE
)
7005 /* If we're already at the start of the function, we've either
7006 just stepped backward into a single instruction function,
7007 or stepped back out of a signal handler to the first instruction
7008 of the function. Just keep going, which will single-step back
7010 if (ecs
->stop_func_start
!= stop_pc
&& ecs
->stop_func_start
!= 0)
7012 /* Normal function call return (static or dynamic). */
7013 symtab_and_line sr_sal
;
7014 sr_sal
.pc
= ecs
->stop_func_start
;
7015 sr_sal
.pspace
= get_frame_program_space (frame
);
7016 insert_step_resume_breakpoint_at_sal (gdbarch
,
7017 sr_sal
, null_frame_id
);
7021 insert_step_resume_breakpoint_at_caller (frame
);
7027 /* If we are in a function call trampoline (a stub between the
7028 calling routine and the real function), locate the real
7029 function. That's what tells us (a) whether we want to step
7030 into it at all, and (b) what prologue we want to run to the
7031 end of, if we do step into it. */
7032 real_stop_pc
= skip_language_trampoline (frame
, stop_pc
);
7033 if (real_stop_pc
== 0)
7034 real_stop_pc
= gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
);
7035 if (real_stop_pc
!= 0)
7036 ecs
->stop_func_start
= real_stop_pc
;
7038 if (real_stop_pc
!= 0 && in_solib_dynsym_resolve_code (real_stop_pc
))
7040 symtab_and_line sr_sal
;
7041 sr_sal
.pc
= ecs
->stop_func_start
;
7042 sr_sal
.pspace
= get_frame_program_space (frame
);
7044 insert_step_resume_breakpoint_at_sal (gdbarch
,
7045 sr_sal
, null_frame_id
);
7050 /* If we have line number information for the function we are
7051 thinking of stepping into and the function isn't on the skip
7054 If there are several symtabs at that PC (e.g. with include
7055 files), just want to know whether *any* of them have line
7056 numbers. find_pc_line handles this. */
7058 struct symtab_and_line tmp_sal
;
7060 tmp_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7061 if (tmp_sal
.line
!= 0
7062 && !function_name_is_marked_for_skip (ecs
->stop_func_name
,
7064 && !inline_frame_is_marked_for_skip (true, ecs
->event_thread
))
7066 if (execution_direction
== EXEC_REVERSE
)
7067 handle_step_into_function_backward (gdbarch
, ecs
);
7069 handle_step_into_function (gdbarch
, ecs
);
7074 /* If we have no line number and the step-stop-if-no-debug is
7075 set, we stop the step so that the user has a chance to switch
7076 in assembly mode. */
7077 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7078 && step_stop_if_no_debug
)
7080 end_stepping_range (ecs
);
7084 if (execution_direction
== EXEC_REVERSE
)
7086 /* If we're already at the start of the function, we've either just
7087 stepped backward into a single instruction function without line
7088 number info, or stepped back out of a signal handler to the first
7089 instruction of the function without line number info. Just keep
7090 going, which will single-step back to the caller. */
7091 if (ecs
->stop_func_start
!= stop_pc
)
7093 /* Set a breakpoint at callee's start address.
7094 From there we can step once and be back in the caller. */
7095 symtab_and_line sr_sal
;
7096 sr_sal
.pc
= ecs
->stop_func_start
;
7097 sr_sal
.pspace
= get_frame_program_space (frame
);
7098 insert_step_resume_breakpoint_at_sal (gdbarch
,
7099 sr_sal
, null_frame_id
);
7103 /* Set a breakpoint at callee's return address (the address
7104 at which the caller will resume). */
7105 insert_step_resume_breakpoint_at_caller (frame
);
7111 /* Reverse stepping through solib trampolines. */
7113 if (execution_direction
== EXEC_REVERSE
7114 && ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_NONE
)
7116 CORE_ADDR stop_pc
= ecs
->event_thread
->suspend
.stop_pc
;
7118 if (gdbarch_skip_trampoline_code (gdbarch
, frame
, stop_pc
)
7119 || (ecs
->stop_func_start
== 0
7120 && in_solib_dynsym_resolve_code (stop_pc
)))
7122 /* Any solib trampoline code can be handled in reverse
7123 by simply continuing to single-step. We have already
7124 executed the solib function (backwards), and a few
7125 steps will take us back through the trampoline to the
7130 else if (in_solib_dynsym_resolve_code (stop_pc
))
7132 /* Stepped backward into the solib dynsym resolver.
7133 Set a breakpoint at its start and continue, then
7134 one more step will take us out. */
7135 symtab_and_line sr_sal
;
7136 sr_sal
.pc
= ecs
->stop_func_start
;
7137 sr_sal
.pspace
= get_frame_program_space (frame
);
7138 insert_step_resume_breakpoint_at_sal (gdbarch
,
7139 sr_sal
, null_frame_id
);
7145 /* This always returns the sal for the inner-most frame when we are in a
7146 stack of inlined frames, even if GDB actually believes that it is in a
7147 more outer frame. This is checked for below by calls to
7148 inline_skipped_frames. */
7149 stop_pc_sal
= find_pc_line (ecs
->event_thread
->suspend
.stop_pc
, 0);
7151 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7152 the trampoline processing logic, however, there are some trampolines
7153 that have no names, so we should do trampoline handling first. */
7154 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_UNDEBUGGABLE
7155 && ecs
->stop_func_name
== NULL
7156 && stop_pc_sal
.line
== 0)
7159 fprintf_unfiltered (gdb_stdlog
,
7160 "infrun: stepped into undebuggable function\n");
7162 /* The inferior just stepped into, or returned to, an
7163 undebuggable function (where there is no debugging information
7164 and no line number corresponding to the address where the
7165 inferior stopped). Since we want to skip this kind of code,
7166 we keep going until the inferior returns from this
7167 function - unless the user has asked us not to (via
7168 set step-mode) or we no longer know how to get back
7169 to the call site. */
7170 if (step_stop_if_no_debug
7171 || !frame_id_p (frame_unwind_caller_id (frame
)))
7173 /* If we have no line number and the step-stop-if-no-debug
7174 is set, we stop the step so that the user has a chance to
7175 switch in assembly mode. */
7176 end_stepping_range (ecs
);
7181 /* Set a breakpoint at callee's return address (the address
7182 at which the caller will resume). */
7183 insert_step_resume_breakpoint_at_caller (frame
);
7189 if (ecs
->event_thread
->control
.step_range_end
== 1)
7191 /* It is stepi or nexti. We always want to stop stepping after
7194 fprintf_unfiltered (gdb_stdlog
, "infrun: stepi/nexti\n");
7195 end_stepping_range (ecs
);
7199 if (stop_pc_sal
.line
== 0)
7201 /* We have no line number information. That means to stop
7202 stepping (does this always happen right after one instruction,
7203 when we do "s" in a function with no line numbers,
7204 or can this happen as a result of a return or longjmp?). */
7206 fprintf_unfiltered (gdb_stdlog
, "infrun: no line number info\n");
7207 end_stepping_range (ecs
);
7211 /* Look for "calls" to inlined functions, part one. If the inline
7212 frame machinery detected some skipped call sites, we have entered
7213 a new inline function. */
7215 if (frame_id_eq (get_frame_id (get_current_frame ()),
7216 ecs
->event_thread
->control
.step_frame_id
)
7217 && inline_skipped_frames (ecs
->event_thread
))
7220 fprintf_unfiltered (gdb_stdlog
,
7221 "infrun: stepped into inlined function\n");
7223 symtab_and_line call_sal
= find_frame_sal (get_current_frame ());
7225 if (ecs
->event_thread
->control
.step_over_calls
!= STEP_OVER_ALL
)
7227 /* For "step", we're going to stop. But if the call site
7228 for this inlined function is on the same source line as
7229 we were previously stepping, go down into the function
7230 first. Otherwise stop at the call site. */
7232 if (call_sal
.line
== ecs
->event_thread
->current_line
7233 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7235 step_into_inline_frame (ecs
->event_thread
);
7236 if (inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7243 end_stepping_range (ecs
);
7248 /* For "next", we should stop at the call site if it is on a
7249 different source line. Otherwise continue through the
7250 inlined function. */
7251 if (call_sal
.line
== ecs
->event_thread
->current_line
7252 && call_sal
.symtab
== ecs
->event_thread
->current_symtab
)
7255 end_stepping_range (ecs
);
7260 /* Look for "calls" to inlined functions, part two. If we are still
7261 in the same real function we were stepping through, but we have
7262 to go further up to find the exact frame ID, we are stepping
7263 through a more inlined call beyond its call site. */
7265 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7266 && !frame_id_eq (get_frame_id (get_current_frame ()),
7267 ecs
->event_thread
->control
.step_frame_id
)
7268 && stepped_in_from (get_current_frame (),
7269 ecs
->event_thread
->control
.step_frame_id
))
7272 fprintf_unfiltered (gdb_stdlog
,
7273 "infrun: stepping through inlined function\n");
7275 if (ecs
->event_thread
->control
.step_over_calls
== STEP_OVER_ALL
7276 || inline_frame_is_marked_for_skip (false, ecs
->event_thread
))
7279 end_stepping_range (ecs
);
7283 bool refresh_step_info
= true;
7284 if ((ecs
->event_thread
->suspend
.stop_pc
== stop_pc_sal
.pc
)
7285 && (ecs
->event_thread
->current_line
!= stop_pc_sal
.line
7286 || ecs
->event_thread
->current_symtab
!= stop_pc_sal
.symtab
))
7288 if (stop_pc_sal
.is_stmt
)
7290 /* We are at the start of a different line. So stop. Note that
7291 we don't stop if we step into the middle of a different line.
7292 That is said to make things like for (;;) statements work
7295 fprintf_unfiltered (gdb_stdlog
,
7296 "infrun: stepped to a different line\n");
7297 end_stepping_range (ecs
);
7300 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7301 ecs
->event_thread
->control
.step_frame_id
))
7303 /* We are at the start of a different line, however, this line is
7304 not marked as a statement, and we have not changed frame. We
7305 ignore this line table entry, and continue stepping forward,
7306 looking for a better place to stop. */
7307 refresh_step_info
= false;
7309 fprintf_unfiltered (gdb_stdlog
,
7310 "infrun: stepped to a different line, but "
7311 "it's not the start of a statement\n");
7315 /* We aren't done stepping.
7317 Optimize by setting the stepping range to the line.
7318 (We might not be in the original line, but if we entered a
7319 new line in mid-statement, we continue stepping. This makes
7320 things like for(;;) statements work better.)
7322 If we entered a SAL that indicates a non-statement line table entry,
7323 then we update the stepping range, but we don't update the step info,
7324 which includes things like the line number we are stepping away from.
7325 This means we will stop when we find a line table entry that is marked
7326 as is-statement, even if it matches the non-statement one we just
7329 ecs
->event_thread
->control
.step_range_start
= stop_pc_sal
.pc
;
7330 ecs
->event_thread
->control
.step_range_end
= stop_pc_sal
.end
;
7331 ecs
->event_thread
->control
.may_range_step
= 1;
7332 if (refresh_step_info
)
7333 set_step_info (ecs
->event_thread
, frame
, stop_pc_sal
);
7336 fprintf_unfiltered (gdb_stdlog
, "infrun: keep going\n");
7340 /* In all-stop mode, if we're currently stepping but have stopped in
7341 some other thread, we may need to switch back to the stepped
7342 thread. Returns true we set the inferior running, false if we left
7343 it stopped (and the event needs further processing). */
7346 switch_back_to_stepped_thread (struct execution_control_state
*ecs
)
7348 if (!target_is_non_stop_p ())
7350 struct thread_info
*stepping_thread
;
7352 /* If any thread is blocked on some internal breakpoint, and we
7353 simply need to step over that breakpoint to get it going
7354 again, do that first. */
7356 /* However, if we see an event for the stepping thread, then we
7357 know all other threads have been moved past their breakpoints
7358 already. Let the caller check whether the step is finished,
7359 etc., before deciding to move it past a breakpoint. */
7360 if (ecs
->event_thread
->control
.step_range_end
!= 0)
7363 /* Check if the current thread is blocked on an incomplete
7364 step-over, interrupted by a random signal. */
7365 if (ecs
->event_thread
->control
.trap_expected
7366 && ecs
->event_thread
->suspend
.stop_signal
!= GDB_SIGNAL_TRAP
)
7370 fprintf_unfiltered (gdb_stdlog
,
7371 "infrun: need to finish step-over of [%s]\n",
7372 target_pid_to_str (ecs
->event_thread
->ptid
).c_str ());
7378 /* Check if the current thread is blocked by a single-step
7379 breakpoint of another thread. */
7380 if (ecs
->hit_singlestep_breakpoint
)
7384 fprintf_unfiltered (gdb_stdlog
,
7385 "infrun: need to step [%s] over single-step "
7387 target_pid_to_str (ecs
->ptid
).c_str ());
7393 /* If this thread needs yet another step-over (e.g., stepping
7394 through a delay slot), do it first before moving on to
7396 if (thread_still_needs_step_over (ecs
->event_thread
))
7400 fprintf_unfiltered (gdb_stdlog
,
7401 "infrun: thread [%s] still needs step-over\n",
7402 target_pid_to_str (ecs
->event_thread
->ptid
).c_str ());
7408 /* If scheduler locking applies even if not stepping, there's no
7409 need to walk over threads. Above we've checked whether the
7410 current thread is stepping. If some other thread not the
7411 event thread is stepping, then it must be that scheduler
7412 locking is not in effect. */
7413 if (schedlock_applies (ecs
->event_thread
))
7416 /* Otherwise, we no longer expect a trap in the current thread.
7417 Clear the trap_expected flag before switching back -- this is
7418 what keep_going does as well, if we call it. */
7419 ecs
->event_thread
->control
.trap_expected
= 0;
7421 /* Likewise, clear the signal if it should not be passed. */
7422 if (!signal_program
[ecs
->event_thread
->suspend
.stop_signal
])
7423 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
7425 /* Do all pending step-overs before actually proceeding with
7427 if (start_step_over ())
7429 prepare_to_wait (ecs
);
7433 /* Look for the stepping/nexting thread. */
7434 stepping_thread
= NULL
;
7436 for (thread_info
*tp
: all_non_exited_threads ())
7438 switch_to_thread_no_regs (tp
);
7440 /* Ignore threads of processes the caller is not
7443 && (tp
->inf
->process_target () != ecs
->target
7444 || tp
->inf
->pid
!= ecs
->ptid
.pid ()))
7447 /* When stepping over a breakpoint, we lock all threads
7448 except the one that needs to move past the breakpoint.
7449 If a non-event thread has this set, the "incomplete
7450 step-over" check above should have caught it earlier. */
7451 if (tp
->control
.trap_expected
)
7453 internal_error (__FILE__
, __LINE__
,
7454 "[%s] has inconsistent state: "
7455 "trap_expected=%d\n",
7456 target_pid_to_str (tp
->ptid
).c_str (),
7457 tp
->control
.trap_expected
);
7460 /* Did we find the stepping thread? */
7461 if (tp
->control
.step_range_end
)
7463 /* Yep. There should only one though. */
7464 gdb_assert (stepping_thread
== NULL
);
7466 /* The event thread is handled at the top, before we
7468 gdb_assert (tp
!= ecs
->event_thread
);
7470 /* If some thread other than the event thread is
7471 stepping, then scheduler locking can't be in effect,
7472 otherwise we wouldn't have resumed the current event
7473 thread in the first place. */
7474 gdb_assert (!schedlock_applies (tp
));
7476 stepping_thread
= tp
;
7480 if (stepping_thread
!= NULL
)
7483 fprintf_unfiltered (gdb_stdlog
,
7484 "infrun: switching back to stepped thread\n");
7486 if (keep_going_stepped_thread (stepping_thread
))
7488 prepare_to_wait (ecs
);
7493 switch_to_thread (ecs
->event_thread
);
7499 /* Set a previously stepped thread back to stepping. Returns true on
7500 success, false if the resume is not possible (e.g., the thread
7504 keep_going_stepped_thread (struct thread_info
*tp
)
7506 struct frame_info
*frame
;
7507 struct execution_control_state ecss
;
7508 struct execution_control_state
*ecs
= &ecss
;
7510 /* If the stepping thread exited, then don't try to switch back and
7511 resume it, which could fail in several different ways depending
7512 on the target. Instead, just keep going.
7514 We can find a stepping dead thread in the thread list in two
7517 - The target supports thread exit events, and when the target
7518 tries to delete the thread from the thread list, inferior_ptid
7519 pointed at the exiting thread. In such case, calling
7520 delete_thread does not really remove the thread from the list;
7521 instead, the thread is left listed, with 'exited' state.
7523 - The target's debug interface does not support thread exit
7524 events, and so we have no idea whatsoever if the previously
7525 stepping thread is still alive. For that reason, we need to
7526 synchronously query the target now. */
7528 if (tp
->state
== THREAD_EXITED
|| !target_thread_alive (tp
->ptid
))
7531 fprintf_unfiltered (gdb_stdlog
,
7532 "infrun: not resuming previously "
7533 "stepped thread, it has vanished\n");
7540 fprintf_unfiltered (gdb_stdlog
,
7541 "infrun: resuming previously stepped thread\n");
7543 reset_ecs (ecs
, tp
);
7544 switch_to_thread (tp
);
7546 tp
->suspend
.stop_pc
= regcache_read_pc (get_thread_regcache (tp
));
7547 frame
= get_current_frame ();
7549 /* If the PC of the thread we were trying to single-step has
7550 changed, then that thread has trapped or been signaled, but the
7551 event has not been reported to GDB yet. Re-poll the target
7552 looking for this particular thread's event (i.e. temporarily
7553 enable schedlock) by:
7555 - setting a break at the current PC
7556 - resuming that particular thread, only (by setting trap
7559 This prevents us continuously moving the single-step breakpoint
7560 forward, one instruction at a time, overstepping. */
7562 if (tp
->suspend
.stop_pc
!= tp
->prev_pc
)
7567 fprintf_unfiltered (gdb_stdlog
,
7568 "infrun: expected thread advanced also (%s -> %s)\n",
7569 paddress (target_gdbarch (), tp
->prev_pc
),
7570 paddress (target_gdbarch (), tp
->suspend
.stop_pc
));
7572 /* Clear the info of the previous step-over, as it's no longer
7573 valid (if the thread was trying to step over a breakpoint, it
7574 has already succeeded). It's what keep_going would do too,
7575 if we called it. Do this before trying to insert the sss
7576 breakpoint, otherwise if we were previously trying to step
7577 over this exact address in another thread, the breakpoint is
7579 clear_step_over_info ();
7580 tp
->control
.trap_expected
= 0;
7582 insert_single_step_breakpoint (get_frame_arch (frame
),
7583 get_frame_address_space (frame
),
7584 tp
->suspend
.stop_pc
);
7587 resume_ptid
= internal_resume_ptid (tp
->control
.stepping_command
);
7588 do_target_resume (resume_ptid
, 0, GDB_SIGNAL_0
);
7593 fprintf_unfiltered (gdb_stdlog
,
7594 "infrun: expected thread still hasn't advanced\n");
7596 keep_going_pass_signal (ecs
);
7601 /* Is thread TP in the middle of (software or hardware)
7602 single-stepping? (Note the result of this function must never be
7603 passed directly as target_resume's STEP parameter.) */
7606 currently_stepping (struct thread_info
*tp
)
7608 return ((tp
->control
.step_range_end
7609 && tp
->control
.step_resume_breakpoint
== NULL
)
7610 || tp
->control
.trap_expected
7611 || tp
->stepped_breakpoint
7612 || bpstat_should_step ());
7615 /* Inferior has stepped into a subroutine call with source code that
7616 we should not step over. Do step to the first line of code in
7620 handle_step_into_function (struct gdbarch
*gdbarch
,
7621 struct execution_control_state
*ecs
)
7623 fill_in_stop_func (gdbarch
, ecs
);
7625 compunit_symtab
*cust
7626 = find_pc_compunit_symtab (ecs
->event_thread
->suspend
.stop_pc
);
7627 if (cust
!= NULL
&& compunit_language (cust
) != language_asm
)
7628 ecs
->stop_func_start
7629 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7631 symtab_and_line stop_func_sal
= find_pc_line (ecs
->stop_func_start
, 0);
7632 /* Use the step_resume_break to step until the end of the prologue,
7633 even if that involves jumps (as it seems to on the vax under
7635 /* If the prologue ends in the middle of a source line, continue to
7636 the end of that source line (if it is still within the function).
7637 Otherwise, just go to end of prologue. */
7638 if (stop_func_sal
.end
7639 && stop_func_sal
.pc
!= ecs
->stop_func_start
7640 && stop_func_sal
.end
< ecs
->stop_func_end
)
7641 ecs
->stop_func_start
= stop_func_sal
.end
;
7643 /* Architectures which require breakpoint adjustment might not be able
7644 to place a breakpoint at the computed address. If so, the test
7645 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7646 ecs->stop_func_start to an address at which a breakpoint may be
7647 legitimately placed.
7649 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7650 made, GDB will enter an infinite loop when stepping through
7651 optimized code consisting of VLIW instructions which contain
7652 subinstructions corresponding to different source lines. On
7653 FR-V, it's not permitted to place a breakpoint on any but the
7654 first subinstruction of a VLIW instruction. When a breakpoint is
7655 set, GDB will adjust the breakpoint address to the beginning of
7656 the VLIW instruction. Thus, we need to make the corresponding
7657 adjustment here when computing the stop address. */
7659 if (gdbarch_adjust_breakpoint_address_p (gdbarch
))
7661 ecs
->stop_func_start
7662 = gdbarch_adjust_breakpoint_address (gdbarch
,
7663 ecs
->stop_func_start
);
7666 if (ecs
->stop_func_start
== ecs
->event_thread
->suspend
.stop_pc
)
7668 /* We are already there: stop now. */
7669 end_stepping_range (ecs
);
7674 /* Put the step-breakpoint there and go until there. */
7675 symtab_and_line sr_sal
;
7676 sr_sal
.pc
= ecs
->stop_func_start
;
7677 sr_sal
.section
= find_pc_overlay (ecs
->stop_func_start
);
7678 sr_sal
.pspace
= get_frame_program_space (get_current_frame ());
7680 /* Do not specify what the fp should be when we stop since on
7681 some machines the prologue is where the new fp value is
7683 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
, null_frame_id
);
7685 /* And make sure stepping stops right away then. */
7686 ecs
->event_thread
->control
.step_range_end
7687 = ecs
->event_thread
->control
.step_range_start
;
7692 /* Inferior has stepped backward into a subroutine call with source
7693 code that we should not step over. Do step to the beginning of the
7694 last line of code in it. */
7697 handle_step_into_function_backward (struct gdbarch
*gdbarch
,
7698 struct execution_control_state
*ecs
)
7700 struct compunit_symtab
*cust
;
7701 struct symtab_and_line stop_func_sal
;
7703 fill_in_stop_func (gdbarch
, ecs
);
7705 cust
= find_pc_compunit_symtab (ecs
->event_thread
->suspend
.stop_pc
);
7706 if (cust
!= NULL
&& compunit_language (cust
) != language_asm
)
7707 ecs
->stop_func_start
7708 = gdbarch_skip_prologue_noexcept (gdbarch
, ecs
->stop_func_start
);
7710 stop_func_sal
= find_pc_line (ecs
->event_thread
->suspend
.stop_pc
, 0);
7712 /* OK, we're just going to keep stepping here. */
7713 if (stop_func_sal
.pc
== ecs
->event_thread
->suspend
.stop_pc
)
7715 /* We're there already. Just stop stepping now. */
7716 end_stepping_range (ecs
);
7720 /* Else just reset the step range and keep going.
7721 No step-resume breakpoint, they don't work for
7722 epilogues, which can have multiple entry paths. */
7723 ecs
->event_thread
->control
.step_range_start
= stop_func_sal
.pc
;
7724 ecs
->event_thread
->control
.step_range_end
= stop_func_sal
.end
;
7730 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
7731 This is used to both functions and to skip over code. */
7734 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch
*gdbarch
,
7735 struct symtab_and_line sr_sal
,
7736 struct frame_id sr_id
,
7737 enum bptype sr_type
)
7739 /* There should never be more than one step-resume or longjmp-resume
7740 breakpoint per thread, so we should never be setting a new
7741 step_resume_breakpoint when one is already active. */
7742 gdb_assert (inferior_thread ()->control
.step_resume_breakpoint
== NULL
);
7743 gdb_assert (sr_type
== bp_step_resume
|| sr_type
== bp_hp_step_resume
);
7746 fprintf_unfiltered (gdb_stdlog
,
7747 "infrun: inserting step-resume breakpoint at %s\n",
7748 paddress (gdbarch
, sr_sal
.pc
));
7750 inferior_thread ()->control
.step_resume_breakpoint
7751 = set_momentary_breakpoint (gdbarch
, sr_sal
, sr_id
, sr_type
).release ();
7755 insert_step_resume_breakpoint_at_sal (struct gdbarch
*gdbarch
,
7756 struct symtab_and_line sr_sal
,
7757 struct frame_id sr_id
)
7759 insert_step_resume_breakpoint_at_sal_1 (gdbarch
,
7764 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7765 This is used to skip a potential signal handler.
7767 This is called with the interrupted function's frame. The signal
7768 handler, when it returns, will resume the interrupted function at
7772 insert_hp_step_resume_breakpoint_at_frame (struct frame_info
*return_frame
)
7774 gdb_assert (return_frame
!= NULL
);
7776 struct gdbarch
*gdbarch
= get_frame_arch (return_frame
);
7778 symtab_and_line sr_sal
;
7779 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
, get_frame_pc (return_frame
));
7780 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7781 sr_sal
.pspace
= get_frame_program_space (return_frame
);
7783 insert_step_resume_breakpoint_at_sal_1 (gdbarch
, sr_sal
,
7784 get_stack_frame_id (return_frame
),
7788 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
7789 is used to skip a function after stepping into it (for "next" or if
7790 the called function has no debugging information).
7792 The current function has almost always been reached by single
7793 stepping a call or return instruction. NEXT_FRAME belongs to the
7794 current function, and the breakpoint will be set at the caller's
7797 This is a separate function rather than reusing
7798 insert_hp_step_resume_breakpoint_at_frame in order to avoid
7799 get_prev_frame, which may stop prematurely (see the implementation
7800 of frame_unwind_caller_id for an example). */
7803 insert_step_resume_breakpoint_at_caller (struct frame_info
*next_frame
)
7805 /* We shouldn't have gotten here if we don't know where the call site
7807 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame
)));
7809 struct gdbarch
*gdbarch
= frame_unwind_caller_arch (next_frame
);
7811 symtab_and_line sr_sal
;
7812 sr_sal
.pc
= gdbarch_addr_bits_remove (gdbarch
,
7813 frame_unwind_caller_pc (next_frame
));
7814 sr_sal
.section
= find_pc_overlay (sr_sal
.pc
);
7815 sr_sal
.pspace
= frame_unwind_program_space (next_frame
);
7817 insert_step_resume_breakpoint_at_sal (gdbarch
, sr_sal
,
7818 frame_unwind_caller_id (next_frame
));
7821 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7822 new breakpoint at the target of a jmp_buf. The handling of
7823 longjmp-resume uses the same mechanisms used for handling
7824 "step-resume" breakpoints. */
7827 insert_longjmp_resume_breakpoint (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
7829 /* There should never be more than one longjmp-resume breakpoint per
7830 thread, so we should never be setting a new
7831 longjmp_resume_breakpoint when one is already active. */
7832 gdb_assert (inferior_thread ()->control
.exception_resume_breakpoint
== NULL
);
7835 fprintf_unfiltered (gdb_stdlog
,
7836 "infrun: inserting longjmp-resume breakpoint at %s\n",
7837 paddress (gdbarch
, pc
));
7839 inferior_thread ()->control
.exception_resume_breakpoint
=
7840 set_momentary_breakpoint_at_pc (gdbarch
, pc
, bp_longjmp_resume
).release ();
7843 /* Insert an exception resume breakpoint. TP is the thread throwing
7844 the exception. The block B is the block of the unwinder debug hook
7845 function. FRAME is the frame corresponding to the call to this
7846 function. SYM is the symbol of the function argument holding the
7847 target PC of the exception. */
7850 insert_exception_resume_breakpoint (struct thread_info
*tp
,
7851 const struct block
*b
,
7852 struct frame_info
*frame
,
7857 struct block_symbol vsym
;
7858 struct value
*value
;
7860 struct breakpoint
*bp
;
7862 vsym
= lookup_symbol_search_name (sym
->search_name (),
7864 value
= read_var_value (vsym
.symbol
, vsym
.block
, frame
);
7865 /* If the value was optimized out, revert to the old behavior. */
7866 if (! value_optimized_out (value
))
7868 handler
= value_as_address (value
);
7871 fprintf_unfiltered (gdb_stdlog
,
7872 "infrun: exception resume at %lx\n",
7873 (unsigned long) handler
);
7875 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
7877 bp_exception_resume
).release ();
7879 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7882 bp
->thread
= tp
->global_num
;
7883 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
7886 catch (const gdb_exception_error
&e
)
7888 /* We want to ignore errors here. */
7892 /* A helper for check_exception_resume that sets an
7893 exception-breakpoint based on a SystemTap probe. */
7896 insert_exception_resume_from_probe (struct thread_info
*tp
,
7897 const struct bound_probe
*probe
,
7898 struct frame_info
*frame
)
7900 struct value
*arg_value
;
7902 struct breakpoint
*bp
;
7904 arg_value
= probe_safe_evaluate_at_pc (frame
, 1);
7908 handler
= value_as_address (arg_value
);
7911 fprintf_unfiltered (gdb_stdlog
,
7912 "infrun: exception resume at %s\n",
7913 paddress (probe
->objfile
->arch (),
7916 bp
= set_momentary_breakpoint_at_pc (get_frame_arch (frame
),
7917 handler
, bp_exception_resume
).release ();
7918 bp
->thread
= tp
->global_num
;
7919 inferior_thread ()->control
.exception_resume_breakpoint
= bp
;
7922 /* This is called when an exception has been intercepted. Check to
7923 see whether the exception's destination is of interest, and if so,
7924 set an exception resume breakpoint there. */
7927 check_exception_resume (struct execution_control_state
*ecs
,
7928 struct frame_info
*frame
)
7930 struct bound_probe probe
;
7931 struct symbol
*func
;
7933 /* First see if this exception unwinding breakpoint was set via a
7934 SystemTap probe point. If so, the probe has two arguments: the
7935 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7936 set a breakpoint there. */
7937 probe
= find_probe_by_pc (get_frame_pc (frame
));
7940 insert_exception_resume_from_probe (ecs
->event_thread
, &probe
, frame
);
7944 func
= get_frame_function (frame
);
7950 const struct block
*b
;
7951 struct block_iterator iter
;
7955 /* The exception breakpoint is a thread-specific breakpoint on
7956 the unwinder's debug hook, declared as:
7958 void _Unwind_DebugHook (void *cfa, void *handler);
7960 The CFA argument indicates the frame to which control is
7961 about to be transferred. HANDLER is the destination PC.
7963 We ignore the CFA and set a temporary breakpoint at HANDLER.
7964 This is not extremely efficient but it avoids issues in gdb
7965 with computing the DWARF CFA, and it also works even in weird
7966 cases such as throwing an exception from inside a signal
7969 b
= SYMBOL_BLOCK_VALUE (func
);
7970 ALL_BLOCK_SYMBOLS (b
, iter
, sym
)
7972 if (!SYMBOL_IS_ARGUMENT (sym
))
7979 insert_exception_resume_breakpoint (ecs
->event_thread
,
7985 catch (const gdb_exception_error
&e
)
7991 stop_waiting (struct execution_control_state
*ecs
)
7994 fprintf_unfiltered (gdb_stdlog
, "infrun: stop_waiting\n");
7996 /* Let callers know we don't want to wait for the inferior anymore. */
7997 ecs
->wait_some_more
= 0;
7999 /* If all-stop, but there exists a non-stop target, stop all
8000 threads now that we're presenting the stop to the user. */
8001 if (!non_stop
&& exists_non_stop_target ())
8002 stop_all_threads ();
8005 /* Like keep_going, but passes the signal to the inferior, even if the
8006 signal is set to nopass. */
8009 keep_going_pass_signal (struct execution_control_state
*ecs
)
8011 gdb_assert (ecs
->event_thread
->ptid
== inferior_ptid
);
8012 gdb_assert (!ecs
->event_thread
->resumed
);
8014 /* Save the pc before execution, to compare with pc after stop. */
8015 ecs
->event_thread
->prev_pc
8016 = regcache_read_pc_protected (get_thread_regcache (ecs
->event_thread
));
8018 if (ecs
->event_thread
->control
.trap_expected
)
8020 struct thread_info
*tp
= ecs
->event_thread
;
8023 fprintf_unfiltered (gdb_stdlog
,
8024 "infrun: %s has trap_expected set, "
8025 "resuming to collect trap\n",
8026 target_pid_to_str (tp
->ptid
).c_str ());
8028 /* We haven't yet gotten our trap, and either: intercepted a
8029 non-signal event (e.g., a fork); or took a signal which we
8030 are supposed to pass through to the inferior. Simply
8032 resume (ecs
->event_thread
->suspend
.stop_signal
);
8034 else if (step_over_info_valid_p ())
8036 /* Another thread is stepping over a breakpoint in-line. If
8037 this thread needs a step-over too, queue the request. In
8038 either case, this resume must be deferred for later. */
8039 struct thread_info
*tp
= ecs
->event_thread
;
8041 if (ecs
->hit_singlestep_breakpoint
8042 || thread_still_needs_step_over (tp
))
8045 fprintf_unfiltered (gdb_stdlog
,
8046 "infrun: step-over already in progress: "
8047 "step-over for %s deferred\n",
8048 target_pid_to_str (tp
->ptid
).c_str ());
8049 thread_step_over_chain_enqueue (tp
);
8054 fprintf_unfiltered (gdb_stdlog
,
8055 "infrun: step-over in progress: "
8056 "resume of %s deferred\n",
8057 target_pid_to_str (tp
->ptid
).c_str ());
8062 struct regcache
*regcache
= get_current_regcache ();
8065 step_over_what step_what
;
8067 /* Either the trap was not expected, but we are continuing
8068 anyway (if we got a signal, the user asked it be passed to
8071 We got our expected trap, but decided we should resume from
8074 We're going to run this baby now!
8076 Note that insert_breakpoints won't try to re-insert
8077 already inserted breakpoints. Therefore, we don't
8078 care if breakpoints were already inserted, or not. */
8080 /* If we need to step over a breakpoint, and we're not using
8081 displaced stepping to do so, insert all breakpoints
8082 (watchpoints, etc.) but the one we're stepping over, step one
8083 instruction, and then re-insert the breakpoint when that step
8086 step_what
= thread_still_needs_step_over (ecs
->event_thread
);
8088 remove_bp
= (ecs
->hit_singlestep_breakpoint
8089 || (step_what
& STEP_OVER_BREAKPOINT
));
8090 remove_wps
= (step_what
& STEP_OVER_WATCHPOINT
);
8092 /* We can't use displaced stepping if we need to step past a
8093 watchpoint. The instruction copied to the scratch pad would
8094 still trigger the watchpoint. */
8096 && (remove_wps
|| !use_displaced_stepping (ecs
->event_thread
)))
8098 set_step_over_info (regcache
->aspace (),
8099 regcache_read_pc (regcache
), remove_wps
,
8100 ecs
->event_thread
->global_num
);
8102 else if (remove_wps
)
8103 set_step_over_info (NULL
, 0, remove_wps
, -1);
8105 /* If we now need to do an in-line step-over, we need to stop
8106 all other threads. Note this must be done before
8107 insert_breakpoints below, because that removes the breakpoint
8108 we're about to step over, otherwise other threads could miss
8110 if (step_over_info_valid_p () && target_is_non_stop_p ())
8111 stop_all_threads ();
8113 /* Stop stepping if inserting breakpoints fails. */
8116 insert_breakpoints ();
8118 catch (const gdb_exception_error
&e
)
8120 exception_print (gdb_stderr
, e
);
8122 clear_step_over_info ();
8126 ecs
->event_thread
->control
.trap_expected
= (remove_bp
|| remove_wps
);
8128 resume (ecs
->event_thread
->suspend
.stop_signal
);
8131 prepare_to_wait (ecs
);
8134 /* Called when we should continue running the inferior, because the
8135 current event doesn't cause a user visible stop. This does the
8136 resuming part; waiting for the next event is done elsewhere. */
8139 keep_going (struct execution_control_state
*ecs
)
8141 if (ecs
->event_thread
->control
.trap_expected
8142 && ecs
->event_thread
->suspend
.stop_signal
== GDB_SIGNAL_TRAP
)
8143 ecs
->event_thread
->control
.trap_expected
= 0;
8145 if (!signal_program
[ecs
->event_thread
->suspend
.stop_signal
])
8146 ecs
->event_thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
8147 keep_going_pass_signal (ecs
);
8150 /* This function normally comes after a resume, before
8151 handle_inferior_event exits. It takes care of any last bits of
8152 housekeeping, and sets the all-important wait_some_more flag. */
8155 prepare_to_wait (struct execution_control_state
*ecs
)
8158 fprintf_unfiltered (gdb_stdlog
, "infrun: prepare_to_wait\n");
8160 ecs
->wait_some_more
= 1;
8162 /* If the target can't async, emulate it by marking the infrun event
8163 handler such that as soon as we get back to the event-loop, we
8164 immediately end up in fetch_inferior_event again calling
8166 if (!target_can_async_p ())
8167 mark_infrun_async_event_handler ();
8170 /* We are done with the step range of a step/next/si/ni command.
8171 Called once for each n of a "step n" operation. */
8174 end_stepping_range (struct execution_control_state
*ecs
)
8176 ecs
->event_thread
->control
.stop_step
= 1;
8180 /* Several print_*_reason functions to print why the inferior has stopped.
8181 We always print something when the inferior exits, or receives a signal.
8182 The rest of the cases are dealt with later on in normal_stop and
8183 print_it_typical. Ideally there should be a call to one of these
8184 print_*_reason functions functions from handle_inferior_event each time
8185 stop_waiting is called.
8187 Note that we don't call these directly, instead we delegate that to
8188 the interpreters, through observers. Interpreters then call these
8189 with whatever uiout is right. */
8192 print_end_stepping_range_reason (struct ui_out
*uiout
)
8194 /* For CLI-like interpreters, print nothing. */
8196 if (uiout
->is_mi_like_p ())
8198 uiout
->field_string ("reason",
8199 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE
));
8204 print_signal_exited_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8206 annotate_signalled ();
8207 if (uiout
->is_mi_like_p ())
8209 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED
));
8210 uiout
->text ("\nProgram terminated with signal ");
8211 annotate_signal_name ();
8212 uiout
->field_string ("signal-name",
8213 gdb_signal_to_name (siggnal
));
8214 annotate_signal_name_end ();
8216 annotate_signal_string ();
8217 uiout
->field_string ("signal-meaning",
8218 gdb_signal_to_string (siggnal
));
8219 annotate_signal_string_end ();
8220 uiout
->text (".\n");
8221 uiout
->text ("The program no longer exists.\n");
8225 print_exited_reason (struct ui_out
*uiout
, int exitstatus
)
8227 struct inferior
*inf
= current_inferior ();
8228 std::string pidstr
= target_pid_to_str (ptid_t (inf
->pid
));
8230 annotate_exited (exitstatus
);
8233 if (uiout
->is_mi_like_p ())
8234 uiout
->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED
));
8235 std::string exit_code_str
8236 = string_printf ("0%o", (unsigned int) exitstatus
);
8237 uiout
->message ("[Inferior %s (%s) exited with code %pF]\n",
8238 plongest (inf
->num
), pidstr
.c_str (),
8239 string_field ("exit-code", exit_code_str
.c_str ()));
8243 if (uiout
->is_mi_like_p ())
8245 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY
));
8246 uiout
->message ("[Inferior %s (%s) exited normally]\n",
8247 plongest (inf
->num
), pidstr
.c_str ());
8252 print_signal_received_reason (struct ui_out
*uiout
, enum gdb_signal siggnal
)
8254 struct thread_info
*thr
= inferior_thread ();
8258 if (uiout
->is_mi_like_p ())
8260 else if (show_thread_that_caused_stop ())
8264 uiout
->text ("\nThread ");
8265 uiout
->field_string ("thread-id", print_thread_id (thr
));
8267 name
= thr
->name
!= NULL
? thr
->name
: target_thread_name (thr
);
8270 uiout
->text (" \"");
8271 uiout
->field_string ("name", name
);
8276 uiout
->text ("\nProgram");
8278 if (siggnal
== GDB_SIGNAL_0
&& !uiout
->is_mi_like_p ())
8279 uiout
->text (" stopped");
8282 uiout
->text (" received signal ");
8283 annotate_signal_name ();
8284 if (uiout
->is_mi_like_p ())
8286 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED
));
8287 uiout
->field_string ("signal-name", gdb_signal_to_name (siggnal
));
8288 annotate_signal_name_end ();
8290 annotate_signal_string ();
8291 uiout
->field_string ("signal-meaning", gdb_signal_to_string (siggnal
));
8293 struct regcache
*regcache
= get_current_regcache ();
8294 struct gdbarch
*gdbarch
= regcache
->arch ();
8295 if (gdbarch_report_signal_info_p (gdbarch
))
8296 gdbarch_report_signal_info (gdbarch
, uiout
, siggnal
);
8298 annotate_signal_string_end ();
8300 uiout
->text (".\n");
8304 print_no_history_reason (struct ui_out
*uiout
)
8306 uiout
->text ("\nNo more reverse-execution history.\n");
8309 /* Print current location without a level number, if we have changed
8310 functions or hit a breakpoint. Print source line if we have one.
8311 bpstat_print contains the logic deciding in detail what to print,
8312 based on the event(s) that just occurred. */
8315 print_stop_location (struct target_waitstatus
*ws
)
8318 enum print_what source_flag
;
8319 int do_frame_printing
= 1;
8320 struct thread_info
*tp
= inferior_thread ();
8322 bpstat_ret
= bpstat_print (tp
->control
.stop_bpstat
, ws
->kind
);
8326 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8327 should) carry around the function and does (or should) use
8328 that when doing a frame comparison. */
8329 if (tp
->control
.stop_step
8330 && frame_id_eq (tp
->control
.step_frame_id
,
8331 get_frame_id (get_current_frame ()))
8332 && (tp
->control
.step_start_function
8333 == find_pc_function (tp
->suspend
.stop_pc
)))
8335 /* Finished step, just print source line. */
8336 source_flag
= SRC_LINE
;
8340 /* Print location and source line. */
8341 source_flag
= SRC_AND_LOC
;
8344 case PRINT_SRC_AND_LOC
:
8345 /* Print location and source line. */
8346 source_flag
= SRC_AND_LOC
;
8348 case PRINT_SRC_ONLY
:
8349 source_flag
= SRC_LINE
;
8352 /* Something bogus. */
8353 source_flag
= SRC_LINE
;
8354 do_frame_printing
= 0;
8357 internal_error (__FILE__
, __LINE__
, _("Unknown value."));
8360 /* The behavior of this routine with respect to the source
8362 SRC_LINE: Print only source line
8363 LOCATION: Print only location
8364 SRC_AND_LOC: Print location and source line. */
8365 if (do_frame_printing
)
8366 print_stack_frame (get_selected_frame (NULL
), 0, source_flag
, 1);
8372 print_stop_event (struct ui_out
*uiout
, bool displays
)
8374 struct target_waitstatus last
;
8375 struct thread_info
*tp
;
8377 get_last_target_status (nullptr, nullptr, &last
);
8380 scoped_restore save_uiout
= make_scoped_restore (¤t_uiout
, uiout
);
8382 print_stop_location (&last
);
8384 /* Display the auto-display expressions. */
8389 tp
= inferior_thread ();
8390 if (tp
->thread_fsm
!= NULL
8391 && tp
->thread_fsm
->finished_p ())
8393 struct return_value_info
*rv
;
8395 rv
= tp
->thread_fsm
->return_value ();
8397 print_return_value (uiout
, rv
);
8404 maybe_remove_breakpoints (void)
8406 if (!breakpoints_should_be_inserted_now () && target_has_execution
)
8408 if (remove_breakpoints ())
8410 target_terminal::ours_for_output ();
8411 printf_filtered (_("Cannot remove breakpoints because "
8412 "program is no longer writable.\nFurther "
8413 "execution is probably impossible.\n"));
8418 /* The execution context that just caused a normal stop. */
8425 DISABLE_COPY_AND_ASSIGN (stop_context
);
8427 bool changed () const;
8432 /* The event PTID. */
8436 /* If stopp for a thread event, this is the thread that caused the
8438 struct thread_info
*thread
;
8440 /* The inferior that caused the stop. */
8444 /* Initializes a new stop context. If stopped for a thread event, this
8445 takes a strong reference to the thread. */
8447 stop_context::stop_context ()
8449 stop_id
= get_stop_id ();
8450 ptid
= inferior_ptid
;
8451 inf_num
= current_inferior ()->num
;
8453 if (inferior_ptid
!= null_ptid
)
8455 /* Take a strong reference so that the thread can't be deleted
8457 thread
= inferior_thread ();
8464 /* Release a stop context previously created with save_stop_context.
8465 Releases the strong reference to the thread as well. */
8467 stop_context::~stop_context ()
8473 /* Return true if the current context no longer matches the saved stop
8477 stop_context::changed () const
8479 if (ptid
!= inferior_ptid
)
8481 if (inf_num
!= current_inferior ()->num
)
8483 if (thread
!= NULL
&& thread
->state
!= THREAD_STOPPED
)
8485 if (get_stop_id () != stop_id
)
8495 struct target_waitstatus last
;
8497 get_last_target_status (nullptr, nullptr, &last
);
8501 /* If an exception is thrown from this point on, make sure to
8502 propagate GDB's knowledge of the executing state to the
8503 frontend/user running state. A QUIT is an easy exception to see
8504 here, so do this before any filtered output. */
8506 ptid_t finish_ptid
= null_ptid
;
8509 finish_ptid
= minus_one_ptid
;
8510 else if (last
.kind
== TARGET_WAITKIND_SIGNALLED
8511 || last
.kind
== TARGET_WAITKIND_EXITED
)
8513 /* On some targets, we may still have live threads in the
8514 inferior when we get a process exit event. E.g., for
8515 "checkpoint", when the current checkpoint/fork exits,
8516 linux-fork.c automatically switches to another fork from
8517 within target_mourn_inferior. */
8518 if (inferior_ptid
!= null_ptid
)
8519 finish_ptid
= ptid_t (inferior_ptid
.pid ());
8521 else if (last
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
8522 finish_ptid
= inferior_ptid
;
8524 gdb::optional
<scoped_finish_thread_state
> maybe_finish_thread_state
;
8525 if (finish_ptid
!= null_ptid
)
8527 maybe_finish_thread_state
.emplace
8528 (user_visible_resume_target (finish_ptid
), finish_ptid
);
8531 /* As we're presenting a stop, and potentially removing breakpoints,
8532 update the thread list so we can tell whether there are threads
8533 running on the target. With target remote, for example, we can
8534 only learn about new threads when we explicitly update the thread
8535 list. Do this before notifying the interpreters about signal
8536 stops, end of stepping ranges, etc., so that the "new thread"
8537 output is emitted before e.g., "Program received signal FOO",
8538 instead of after. */
8539 update_thread_list ();
8541 if (last
.kind
== TARGET_WAITKIND_STOPPED
&& stopped_by_random_signal
)
8542 gdb::observers::signal_received
.notify (inferior_thread ()->suspend
.stop_signal
);
8544 /* As with the notification of thread events, we want to delay
8545 notifying the user that we've switched thread context until
8546 the inferior actually stops.
8548 There's no point in saying anything if the inferior has exited.
8549 Note that SIGNALLED here means "exited with a signal", not
8550 "received a signal".
8552 Also skip saying anything in non-stop mode. In that mode, as we
8553 don't want GDB to switch threads behind the user's back, to avoid
8554 races where the user is typing a command to apply to thread x,
8555 but GDB switches to thread y before the user finishes entering
8556 the command, fetch_inferior_event installs a cleanup to restore
8557 the current thread back to the thread the user had selected right
8558 after this event is handled, so we're not really switching, only
8559 informing of a stop. */
8561 && previous_inferior_ptid
!= inferior_ptid
8562 && target_has_execution
8563 && last
.kind
!= TARGET_WAITKIND_SIGNALLED
8564 && last
.kind
!= TARGET_WAITKIND_EXITED
8565 && last
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
8567 SWITCH_THRU_ALL_UIS ()
8569 target_terminal::ours_for_output ();
8570 printf_filtered (_("[Switching to %s]\n"),
8571 target_pid_to_str (inferior_ptid
).c_str ());
8572 annotate_thread_changed ();
8574 previous_inferior_ptid
= inferior_ptid
;
8577 if (last
.kind
== TARGET_WAITKIND_NO_RESUMED
)
8579 SWITCH_THRU_ALL_UIS ()
8580 if (current_ui
->prompt_state
== PROMPT_BLOCKED
)
8582 target_terminal::ours_for_output ();
8583 printf_filtered (_("No unwaited-for children left.\n"));
8587 /* Note: this depends on the update_thread_list call above. */
8588 maybe_remove_breakpoints ();
8590 /* If an auto-display called a function and that got a signal,
8591 delete that auto-display to avoid an infinite recursion. */
8593 if (stopped_by_random_signal
)
8594 disable_current_display ();
8596 SWITCH_THRU_ALL_UIS ()
8598 async_enable_stdin ();
8601 /* Let the user/frontend see the threads as stopped. */
8602 maybe_finish_thread_state
.reset ();
8604 /* Select innermost stack frame - i.e., current frame is frame 0,
8605 and current location is based on that. Handle the case where the
8606 dummy call is returning after being stopped. E.g. the dummy call
8607 previously hit a breakpoint. (If the dummy call returns
8608 normally, we won't reach here.) Do this before the stop hook is
8609 run, so that it doesn't get to see the temporary dummy frame,
8610 which is not where we'll present the stop. */
8611 if (has_stack_frames ())
8613 if (stop_stack_dummy
== STOP_STACK_DUMMY
)
8615 /* Pop the empty frame that contains the stack dummy. This
8616 also restores inferior state prior to the call (struct
8617 infcall_suspend_state). */
8618 struct frame_info
*frame
= get_current_frame ();
8620 gdb_assert (get_frame_type (frame
) == DUMMY_FRAME
);
8622 /* frame_pop calls reinit_frame_cache as the last thing it
8623 does which means there's now no selected frame. */
8626 select_frame (get_current_frame ());
8628 /* Set the current source location. */
8629 set_current_sal_from_frame (get_current_frame ());
8632 /* Look up the hook_stop and run it (CLI internally handles problem
8633 of stop_command's pre-hook not existing). */
8634 if (stop_command
!= NULL
)
8636 stop_context saved_context
;
8640 execute_cmd_pre_hook (stop_command
);
8642 catch (const gdb_exception
&ex
)
8644 exception_fprintf (gdb_stderr
, ex
,
8645 "Error while running hook_stop:\n");
8648 /* If the stop hook resumes the target, then there's no point in
8649 trying to notify about the previous stop; its context is
8650 gone. Likewise if the command switches thread or inferior --
8651 the observers would print a stop for the wrong
8653 if (saved_context
.changed ())
8657 /* Notify observers about the stop. This is where the interpreters
8658 print the stop event. */
8659 if (inferior_ptid
!= null_ptid
)
8660 gdb::observers::normal_stop
.notify (inferior_thread ()->control
.stop_bpstat
,
8663 gdb::observers::normal_stop
.notify (NULL
, stop_print_frame
);
8665 annotate_stopped ();
8667 if (target_has_execution
)
8669 if (last
.kind
!= TARGET_WAITKIND_SIGNALLED
8670 && last
.kind
!= TARGET_WAITKIND_EXITED
8671 && last
.kind
!= TARGET_WAITKIND_NO_RESUMED
)
8672 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8673 Delete any breakpoint that is to be deleted at the next stop. */
8674 breakpoint_auto_delete (inferior_thread ()->control
.stop_bpstat
);
8677 /* Try to get rid of automatically added inferiors that are no
8678 longer needed. Keeping those around slows down things linearly.
8679 Note that this never removes the current inferior. */
8686 signal_stop_state (int signo
)
8688 return signal_stop
[signo
];
8692 signal_print_state (int signo
)
8694 return signal_print
[signo
];
8698 signal_pass_state (int signo
)
8700 return signal_program
[signo
];
8704 signal_cache_update (int signo
)
8708 for (signo
= 0; signo
< (int) GDB_SIGNAL_LAST
; signo
++)
8709 signal_cache_update (signo
);
8714 signal_pass
[signo
] = (signal_stop
[signo
] == 0
8715 && signal_print
[signo
] == 0
8716 && signal_program
[signo
] == 1
8717 && signal_catch
[signo
] == 0);
8721 signal_stop_update (int signo
, int state
)
8723 int ret
= signal_stop
[signo
];
8725 signal_stop
[signo
] = state
;
8726 signal_cache_update (signo
);
8731 signal_print_update (int signo
, int state
)
8733 int ret
= signal_print
[signo
];
8735 signal_print
[signo
] = state
;
8736 signal_cache_update (signo
);
8741 signal_pass_update (int signo
, int state
)
8743 int ret
= signal_program
[signo
];
8745 signal_program
[signo
] = state
;
8746 signal_cache_update (signo
);
8750 /* Update the global 'signal_catch' from INFO and notify the
8754 signal_catch_update (const unsigned int *info
)
8758 for (i
= 0; i
< GDB_SIGNAL_LAST
; ++i
)
8759 signal_catch
[i
] = info
[i
] > 0;
8760 signal_cache_update (-1);
8761 target_pass_signals (signal_pass
);
8765 sig_print_header (void)
8767 printf_filtered (_("Signal Stop\tPrint\tPass "
8768 "to program\tDescription\n"));
8772 sig_print_info (enum gdb_signal oursig
)
8774 const char *name
= gdb_signal_to_name (oursig
);
8775 int name_padding
= 13 - strlen (name
);
8777 if (name_padding
<= 0)
8780 printf_filtered ("%s", name
);
8781 printf_filtered ("%*.*s ", name_padding
, name_padding
, " ");
8782 printf_filtered ("%s\t", signal_stop
[oursig
] ? "Yes" : "No");
8783 printf_filtered ("%s\t", signal_print
[oursig
] ? "Yes" : "No");
8784 printf_filtered ("%s\t\t", signal_program
[oursig
] ? "Yes" : "No");
8785 printf_filtered ("%s\n", gdb_signal_to_string (oursig
));
8788 /* Specify how various signals in the inferior should be handled. */
8791 handle_command (const char *args
, int from_tty
)
8793 int digits
, wordlen
;
8794 int sigfirst
, siglast
;
8795 enum gdb_signal oursig
;
8800 error_no_arg (_("signal to handle"));
8803 /* Allocate and zero an array of flags for which signals to handle. */
8805 const size_t nsigs
= GDB_SIGNAL_LAST
;
8806 unsigned char sigs
[nsigs
] {};
8808 /* Break the command line up into args. */
8810 gdb_argv
built_argv (args
);
8812 /* Walk through the args, looking for signal oursigs, signal names, and
8813 actions. Signal numbers and signal names may be interspersed with
8814 actions, with the actions being performed for all signals cumulatively
8815 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
8817 for (char *arg
: built_argv
)
8819 wordlen
= strlen (arg
);
8820 for (digits
= 0; isdigit (arg
[digits
]); digits
++)
8824 sigfirst
= siglast
= -1;
8826 if (wordlen
>= 1 && !strncmp (arg
, "all", wordlen
))
8828 /* Apply action to all signals except those used by the
8829 debugger. Silently skip those. */
8832 siglast
= nsigs
- 1;
8834 else if (wordlen
>= 1 && !strncmp (arg
, "stop", wordlen
))
8836 SET_SIGS (nsigs
, sigs
, signal_stop
);
8837 SET_SIGS (nsigs
, sigs
, signal_print
);
8839 else if (wordlen
>= 1 && !strncmp (arg
, "ignore", wordlen
))
8841 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8843 else if (wordlen
>= 2 && !strncmp (arg
, "print", wordlen
))
8845 SET_SIGS (nsigs
, sigs
, signal_print
);
8847 else if (wordlen
>= 2 && !strncmp (arg
, "pass", wordlen
))
8849 SET_SIGS (nsigs
, sigs
, signal_program
);
8851 else if (wordlen
>= 3 && !strncmp (arg
, "nostop", wordlen
))
8853 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8855 else if (wordlen
>= 3 && !strncmp (arg
, "noignore", wordlen
))
8857 SET_SIGS (nsigs
, sigs
, signal_program
);
8859 else if (wordlen
>= 4 && !strncmp (arg
, "noprint", wordlen
))
8861 UNSET_SIGS (nsigs
, sigs
, signal_print
);
8862 UNSET_SIGS (nsigs
, sigs
, signal_stop
);
8864 else if (wordlen
>= 4 && !strncmp (arg
, "nopass", wordlen
))
8866 UNSET_SIGS (nsigs
, sigs
, signal_program
);
8868 else if (digits
> 0)
8870 /* It is numeric. The numeric signal refers to our own
8871 internal signal numbering from target.h, not to host/target
8872 signal number. This is a feature; users really should be
8873 using symbolic names anyway, and the common ones like
8874 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8876 sigfirst
= siglast
= (int)
8877 gdb_signal_from_command (atoi (arg
));
8878 if (arg
[digits
] == '-')
8881 gdb_signal_from_command (atoi (arg
+ digits
+ 1));
8883 if (sigfirst
> siglast
)
8885 /* Bet he didn't figure we'd think of this case... */
8886 std::swap (sigfirst
, siglast
);
8891 oursig
= gdb_signal_from_name (arg
);
8892 if (oursig
!= GDB_SIGNAL_UNKNOWN
)
8894 sigfirst
= siglast
= (int) oursig
;
8898 /* Not a number and not a recognized flag word => complain. */
8899 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg
);
8903 /* If any signal numbers or symbol names were found, set flags for
8904 which signals to apply actions to. */
8906 for (int signum
= sigfirst
; signum
>= 0 && signum
<= siglast
; signum
++)
8908 switch ((enum gdb_signal
) signum
)
8910 case GDB_SIGNAL_TRAP
:
8911 case GDB_SIGNAL_INT
:
8912 if (!allsigs
&& !sigs
[signum
])
8914 if (query (_("%s is used by the debugger.\n\
8915 Are you sure you want to change it? "),
8916 gdb_signal_to_name ((enum gdb_signal
) signum
)))
8921 printf_unfiltered (_("Not confirmed, unchanged.\n"));
8925 case GDB_SIGNAL_DEFAULT
:
8926 case GDB_SIGNAL_UNKNOWN
:
8927 /* Make sure that "all" doesn't print these. */
8936 for (int signum
= 0; signum
< nsigs
; signum
++)
8939 signal_cache_update (-1);
8940 target_pass_signals (signal_pass
);
8941 target_program_signals (signal_program
);
8945 /* Show the results. */
8946 sig_print_header ();
8947 for (; signum
< nsigs
; signum
++)
8949 sig_print_info ((enum gdb_signal
) signum
);
8956 /* Complete the "handle" command. */
8959 handle_completer (struct cmd_list_element
*ignore
,
8960 completion_tracker
&tracker
,
8961 const char *text
, const char *word
)
8963 static const char * const keywords
[] =
8977 signal_completer (ignore
, tracker
, text
, word
);
8978 complete_on_enum (tracker
, keywords
, word
, word
);
8982 gdb_signal_from_command (int num
)
8984 if (num
>= 1 && num
<= 15)
8985 return (enum gdb_signal
) num
;
8986 error (_("Only signals 1-15 are valid as numeric signals.\n\
8987 Use \"info signals\" for a list of symbolic signals."));
8990 /* Print current contents of the tables set by the handle command.
8991 It is possible we should just be printing signals actually used
8992 by the current target (but for things to work right when switching
8993 targets, all signals should be in the signal tables). */
8996 info_signals_command (const char *signum_exp
, int from_tty
)
8998 enum gdb_signal oursig
;
9000 sig_print_header ();
9004 /* First see if this is a symbol name. */
9005 oursig
= gdb_signal_from_name (signum_exp
);
9006 if (oursig
== GDB_SIGNAL_UNKNOWN
)
9008 /* No, try numeric. */
9010 gdb_signal_from_command (parse_and_eval_long (signum_exp
));
9012 sig_print_info (oursig
);
9016 printf_filtered ("\n");
9017 /* These ugly casts brought to you by the native VAX compiler. */
9018 for (oursig
= GDB_SIGNAL_FIRST
;
9019 (int) oursig
< (int) GDB_SIGNAL_LAST
;
9020 oursig
= (enum gdb_signal
) ((int) oursig
+ 1))
9024 if (oursig
!= GDB_SIGNAL_UNKNOWN
9025 && oursig
!= GDB_SIGNAL_DEFAULT
&& oursig
!= GDB_SIGNAL_0
)
9026 sig_print_info (oursig
);
9029 printf_filtered (_("\nUse the \"handle\" command "
9030 "to change these tables.\n"));
9033 /* The $_siginfo convenience variable is a bit special. We don't know
9034 for sure the type of the value until we actually have a chance to
9035 fetch the data. The type can change depending on gdbarch, so it is
9036 also dependent on which thread you have selected.
9038 1. making $_siginfo be an internalvar that creates a new value on
9041 2. making the value of $_siginfo be an lval_computed value. */
9043 /* This function implements the lval_computed support for reading a
9047 siginfo_value_read (struct value
*v
)
9049 LONGEST transferred
;
9051 /* If we can access registers, so can we access $_siginfo. Likewise
9053 validate_registers_access ();
9056 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO
,
9058 value_contents_all_raw (v
),
9060 TYPE_LENGTH (value_type (v
)));
9062 if (transferred
!= TYPE_LENGTH (value_type (v
)))
9063 error (_("Unable to read siginfo"));
9066 /* This function implements the lval_computed support for writing a
9070 siginfo_value_write (struct value
*v
, struct value
*fromval
)
9072 LONGEST transferred
;
9074 /* If we can access registers, so can we access $_siginfo. Likewise
9076 validate_registers_access ();
9078 transferred
= target_write (current_top_target (),
9079 TARGET_OBJECT_SIGNAL_INFO
,
9081 value_contents_all_raw (fromval
),
9083 TYPE_LENGTH (value_type (fromval
)));
9085 if (transferred
!= TYPE_LENGTH (value_type (fromval
)))
9086 error (_("Unable to write siginfo"));
9089 static const struct lval_funcs siginfo_value_funcs
=
9095 /* Return a new value with the correct type for the siginfo object of
9096 the current thread using architecture GDBARCH. Return a void value
9097 if there's no object available. */
9099 static struct value
*
9100 siginfo_make_value (struct gdbarch
*gdbarch
, struct internalvar
*var
,
9103 if (target_has_stack
9104 && inferior_ptid
!= null_ptid
9105 && gdbarch_get_siginfo_type_p (gdbarch
))
9107 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9109 return allocate_computed_value (type
, &siginfo_value_funcs
, NULL
);
9112 return allocate_value (builtin_type (gdbarch
)->builtin_void
);
9116 /* infcall_suspend_state contains state about the program itself like its
9117 registers and any signal it received when it last stopped.
9118 This state must be restored regardless of how the inferior function call
9119 ends (either successfully, or after it hits a breakpoint or signal)
9120 if the program is to properly continue where it left off. */
9122 class infcall_suspend_state
9125 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9126 once the inferior function call has finished. */
9127 infcall_suspend_state (struct gdbarch
*gdbarch
,
9128 const struct thread_info
*tp
,
9129 struct regcache
*regcache
)
9130 : m_thread_suspend (tp
->suspend
),
9131 m_registers (new readonly_detached_regcache (*regcache
))
9133 gdb::unique_xmalloc_ptr
<gdb_byte
> siginfo_data
;
9135 if (gdbarch_get_siginfo_type_p (gdbarch
))
9137 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9138 size_t len
= TYPE_LENGTH (type
);
9140 siginfo_data
.reset ((gdb_byte
*) xmalloc (len
));
9142 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO
, NULL
,
9143 siginfo_data
.get (), 0, len
) != len
)
9145 /* Errors ignored. */
9146 siginfo_data
.reset (nullptr);
9152 m_siginfo_gdbarch
= gdbarch
;
9153 m_siginfo_data
= std::move (siginfo_data
);
9157 /* Return a pointer to the stored register state. */
9159 readonly_detached_regcache
*registers () const
9161 return m_registers
.get ();
9164 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9166 void restore (struct gdbarch
*gdbarch
,
9167 struct thread_info
*tp
,
9168 struct regcache
*regcache
) const
9170 tp
->suspend
= m_thread_suspend
;
9172 if (m_siginfo_gdbarch
== gdbarch
)
9174 struct type
*type
= gdbarch_get_siginfo_type (gdbarch
);
9176 /* Errors ignored. */
9177 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO
, NULL
,
9178 m_siginfo_data
.get (), 0, TYPE_LENGTH (type
));
9181 /* The inferior can be gone if the user types "print exit(0)"
9182 (and perhaps other times). */
9183 if (target_has_execution
)
9184 /* NB: The register write goes through to the target. */
9185 regcache
->restore (registers ());
9189 /* How the current thread stopped before the inferior function call was
9191 struct thread_suspend_state m_thread_suspend
;
9193 /* The registers before the inferior function call was executed. */
9194 std::unique_ptr
<readonly_detached_regcache
> m_registers
;
9196 /* Format of SIGINFO_DATA or NULL if it is not present. */
9197 struct gdbarch
*m_siginfo_gdbarch
= nullptr;
9199 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9200 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
9201 content would be invalid. */
9202 gdb::unique_xmalloc_ptr
<gdb_byte
> m_siginfo_data
;
9205 infcall_suspend_state_up
9206 save_infcall_suspend_state ()
9208 struct thread_info
*tp
= inferior_thread ();
9209 struct regcache
*regcache
= get_current_regcache ();
9210 struct gdbarch
*gdbarch
= regcache
->arch ();
9212 infcall_suspend_state_up inf_state
9213 (new struct infcall_suspend_state (gdbarch
, tp
, regcache
));
9215 /* Having saved the current state, adjust the thread state, discarding
9216 any stop signal information. The stop signal is not useful when
9217 starting an inferior function call, and run_inferior_call will not use
9218 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9219 tp
->suspend
.stop_signal
= GDB_SIGNAL_0
;
9224 /* Restore inferior session state to INF_STATE. */
9227 restore_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9229 struct thread_info
*tp
= inferior_thread ();
9230 struct regcache
*regcache
= get_current_regcache ();
9231 struct gdbarch
*gdbarch
= regcache
->arch ();
9233 inf_state
->restore (gdbarch
, tp
, regcache
);
9234 discard_infcall_suspend_state (inf_state
);
9238 discard_infcall_suspend_state (struct infcall_suspend_state
*inf_state
)
9243 readonly_detached_regcache
*
9244 get_infcall_suspend_state_regcache (struct infcall_suspend_state
*inf_state
)
9246 return inf_state
->registers ();
9249 /* infcall_control_state contains state regarding gdb's control of the
9250 inferior itself like stepping control. It also contains session state like
9251 the user's currently selected frame. */
9253 struct infcall_control_state
9255 struct thread_control_state thread_control
;
9256 struct inferior_control_state inferior_control
;
9259 enum stop_stack_kind stop_stack_dummy
= STOP_NONE
;
9260 int stopped_by_random_signal
= 0;
9262 /* ID if the selected frame when the inferior function call was made. */
9263 struct frame_id selected_frame_id
{};
9266 /* Save all of the information associated with the inferior<==>gdb
9269 infcall_control_state_up
9270 save_infcall_control_state ()
9272 infcall_control_state_up
inf_status (new struct infcall_control_state
);
9273 struct thread_info
*tp
= inferior_thread ();
9274 struct inferior
*inf
= current_inferior ();
9276 inf_status
->thread_control
= tp
->control
;
9277 inf_status
->inferior_control
= inf
->control
;
9279 tp
->control
.step_resume_breakpoint
= NULL
;
9280 tp
->control
.exception_resume_breakpoint
= NULL
;
9282 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9283 chain. If caller's caller is walking the chain, they'll be happier if we
9284 hand them back the original chain when restore_infcall_control_state is
9286 tp
->control
.stop_bpstat
= bpstat_copy (tp
->control
.stop_bpstat
);
9289 inf_status
->stop_stack_dummy
= stop_stack_dummy
;
9290 inf_status
->stopped_by_random_signal
= stopped_by_random_signal
;
9292 inf_status
->selected_frame_id
= get_frame_id (get_selected_frame (NULL
));
9298 restore_selected_frame (const frame_id
&fid
)
9300 frame_info
*frame
= frame_find_by_id (fid
);
9302 /* If inf_status->selected_frame_id is NULL, there was no previously
9306 warning (_("Unable to restore previously selected frame."));
9310 select_frame (frame
);
9313 /* Restore inferior session state to INF_STATUS. */
9316 restore_infcall_control_state (struct infcall_control_state
*inf_status
)
9318 struct thread_info
*tp
= inferior_thread ();
9319 struct inferior
*inf
= current_inferior ();
9321 if (tp
->control
.step_resume_breakpoint
)
9322 tp
->control
.step_resume_breakpoint
->disposition
= disp_del_at_next_stop
;
9324 if (tp
->control
.exception_resume_breakpoint
)
9325 tp
->control
.exception_resume_breakpoint
->disposition
9326 = disp_del_at_next_stop
;
9328 /* Handle the bpstat_copy of the chain. */
9329 bpstat_clear (&tp
->control
.stop_bpstat
);
9331 tp
->control
= inf_status
->thread_control
;
9332 inf
->control
= inf_status
->inferior_control
;
9335 stop_stack_dummy
= inf_status
->stop_stack_dummy
;
9336 stopped_by_random_signal
= inf_status
->stopped_by_random_signal
;
9338 if (target_has_stack
)
9340 /* The point of the try/catch is that if the stack is clobbered,
9341 walking the stack might encounter a garbage pointer and
9342 error() trying to dereference it. */
9345 restore_selected_frame (inf_status
->selected_frame_id
);
9347 catch (const gdb_exception_error
&ex
)
9349 exception_fprintf (gdb_stderr
, ex
,
9350 "Unable to restore previously selected frame:\n");
9351 /* Error in restoring the selected frame. Select the
9353 select_frame (get_current_frame ());
9361 discard_infcall_control_state (struct infcall_control_state
*inf_status
)
9363 if (inf_status
->thread_control
.step_resume_breakpoint
)
9364 inf_status
->thread_control
.step_resume_breakpoint
->disposition
9365 = disp_del_at_next_stop
;
9367 if (inf_status
->thread_control
.exception_resume_breakpoint
)
9368 inf_status
->thread_control
.exception_resume_breakpoint
->disposition
9369 = disp_del_at_next_stop
;
9371 /* See save_infcall_control_state for info on stop_bpstat. */
9372 bpstat_clear (&inf_status
->thread_control
.stop_bpstat
);
9380 clear_exit_convenience_vars (void)
9382 clear_internalvar (lookup_internalvar ("_exitsignal"));
9383 clear_internalvar (lookup_internalvar ("_exitcode"));
9387 /* User interface for reverse debugging:
9388 Set exec-direction / show exec-direction commands
9389 (returns error unless target implements to_set_exec_direction method). */
9391 enum exec_direction_kind execution_direction
= EXEC_FORWARD
;
9392 static const char exec_forward
[] = "forward";
9393 static const char exec_reverse
[] = "reverse";
9394 static const char *exec_direction
= exec_forward
;
9395 static const char *const exec_direction_names
[] = {
9402 set_exec_direction_func (const char *args
, int from_tty
,
9403 struct cmd_list_element
*cmd
)
9405 if (target_can_execute_reverse
)
9407 if (!strcmp (exec_direction
, exec_forward
))
9408 execution_direction
= EXEC_FORWARD
;
9409 else if (!strcmp (exec_direction
, exec_reverse
))
9410 execution_direction
= EXEC_REVERSE
;
9414 exec_direction
= exec_forward
;
9415 error (_("Target does not support this operation."));
9420 show_exec_direction_func (struct ui_file
*out
, int from_tty
,
9421 struct cmd_list_element
*cmd
, const char *value
)
9423 switch (execution_direction
) {
9425 fprintf_filtered (out
, _("Forward.\n"));
9428 fprintf_filtered (out
, _("Reverse.\n"));
9431 internal_error (__FILE__
, __LINE__
,
9432 _("bogus execution_direction value: %d"),
9433 (int) execution_direction
);
9438 show_schedule_multiple (struct ui_file
*file
, int from_tty
,
9439 struct cmd_list_element
*c
, const char *value
)
9441 fprintf_filtered (file
, _("Resuming the execution of threads "
9442 "of all processes is %s.\n"), value
);
9445 /* Implementation of `siginfo' variable. */
9447 static const struct internalvar_funcs siginfo_funcs
=
9454 /* Callback for infrun's target events source. This is marked when a
9455 thread has a pending status to process. */
9458 infrun_async_inferior_event_handler (gdb_client_data data
)
9460 inferior_event_handler (INF_REG_EVENT
);
9466 /* Verify that when two threads with the same ptid exist (from two different
9467 targets) and one of them changes ptid, we only update inferior_ptid if
9468 it is appropriate. */
9471 infrun_thread_ptid_changed ()
9473 gdbarch
*arch
= current_inferior ()->gdbarch
;
9475 /* The thread which inferior_ptid represents changes ptid. */
9477 scoped_restore_current_pspace_and_thread restore
;
9479 scoped_mock_context
<test_target_ops
> target1 (arch
);
9480 scoped_mock_context
<test_target_ops
> target2 (arch
);
9481 target2
.mock_inferior
.next
= &target1
.mock_inferior
;
9483 ptid_t
old_ptid (111, 222);
9484 ptid_t
new_ptid (111, 333);
9486 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9487 target1
.mock_thread
.ptid
= old_ptid
;
9488 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9489 target2
.mock_thread
.ptid
= old_ptid
;
9491 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9492 set_current_inferior (&target1
.mock_inferior
);
9494 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9496 gdb_assert (inferior_ptid
== new_ptid
);
9499 /* A thread with the same ptid as inferior_ptid, but from another target,
9502 scoped_restore_current_pspace_and_thread restore
;
9504 scoped_mock_context
<test_target_ops
> target1 (arch
);
9505 scoped_mock_context
<test_target_ops
> target2 (arch
);
9506 target2
.mock_inferior
.next
= &target1
.mock_inferior
;
9508 ptid_t
old_ptid (111, 222);
9509 ptid_t
new_ptid (111, 333);
9511 target1
.mock_inferior
.pid
= old_ptid
.pid ();
9512 target1
.mock_thread
.ptid
= old_ptid
;
9513 target2
.mock_inferior
.pid
= old_ptid
.pid ();
9514 target2
.mock_thread
.ptid
= old_ptid
;
9516 auto restore_inferior_ptid
= make_scoped_restore (&inferior_ptid
, old_ptid
);
9517 set_current_inferior (&target2
.mock_inferior
);
9519 thread_change_ptid (&target1
.mock_target
, old_ptid
, new_ptid
);
9521 gdb_assert (inferior_ptid
== old_ptid
);
9525 } /* namespace selftests */
9527 void _initialize_infrun ();
9529 _initialize_infrun ()
9531 struct cmd_list_element
*c
;
9533 /* Register extra event sources in the event loop. */
9534 infrun_async_inferior_event_token
9535 = create_async_event_handler (infrun_async_inferior_event_handler
, NULL
);
9537 add_info ("signals", info_signals_command
, _("\
9538 What debugger does when program gets various signals.\n\
9539 Specify a signal as argument to print info on that signal only."));
9540 add_info_alias ("handle", "signals", 0);
9542 c
= add_com ("handle", class_run
, handle_command
, _("\
9543 Specify how to handle signals.\n\
9544 Usage: handle SIGNAL [ACTIONS]\n\
9545 Args are signals and actions to apply to those signals.\n\
9546 If no actions are specified, the current settings for the specified signals\n\
9547 will be displayed instead.\n\
9549 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9550 from 1-15 are allowed for compatibility with old versions of GDB.\n\
9551 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9552 The special arg \"all\" is recognized to mean all signals except those\n\
9553 used by the debugger, typically SIGTRAP and SIGINT.\n\
9555 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
9556 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9557 Stop means reenter debugger if this signal happens (implies print).\n\
9558 Print means print a message if this signal happens.\n\
9559 Pass means let program see this signal; otherwise program doesn't know.\n\
9560 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
9561 Pass and Stop may be combined.\n\
9563 Multiple signals may be specified. Signal numbers and signal names\n\
9564 may be interspersed with actions, with the actions being performed for\n\
9565 all signals cumulatively specified."));
9566 set_cmd_completer (c
, handle_completer
);
9569 stop_command
= add_cmd ("stop", class_obscure
,
9570 not_just_help_class_command
, _("\
9571 There is no `stop' command, but you can set a hook on `stop'.\n\
9572 This allows you to set a list of commands to be run each time execution\n\
9573 of the program stops."), &cmdlist
);
9575 add_setshow_zuinteger_cmd ("infrun", class_maintenance
, &debug_infrun
, _("\
9576 Set inferior debugging."), _("\
9577 Show inferior debugging."), _("\
9578 When non-zero, inferior specific debugging is enabled."),
9581 &setdebuglist
, &showdebuglist
);
9583 add_setshow_boolean_cmd ("displaced", class_maintenance
,
9584 &debug_displaced
, _("\
9585 Set displaced stepping debugging."), _("\
9586 Show displaced stepping debugging."), _("\
9587 When non-zero, displaced stepping specific debugging is enabled."),
9589 show_debug_displaced
,
9590 &setdebuglist
, &showdebuglist
);
9592 add_setshow_boolean_cmd ("non-stop", no_class
,
9594 Set whether gdb controls the inferior in non-stop mode."), _("\
9595 Show whether gdb controls the inferior in non-stop mode."), _("\
9596 When debugging a multi-threaded program and this setting is\n\
9597 off (the default, also called all-stop mode), when one thread stops\n\
9598 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9599 all other threads in the program while you interact with the thread of\n\
9600 interest. When you continue or step a thread, you can allow the other\n\
9601 threads to run, or have them remain stopped, but while you inspect any\n\
9602 thread's state, all threads stop.\n\
9604 In non-stop mode, when one thread stops, other threads can continue\n\
9605 to run freely. You'll be able to step each thread independently,\n\
9606 leave it stopped or free to run as needed."),
9612 for (size_t i
= 0; i
< GDB_SIGNAL_LAST
; i
++)
9615 signal_print
[i
] = 1;
9616 signal_program
[i
] = 1;
9617 signal_catch
[i
] = 0;
9620 /* Signals caused by debugger's own actions should not be given to
9621 the program afterwards.
9623 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9624 explicitly specifies that it should be delivered to the target
9625 program. Typically, that would occur when a user is debugging a
9626 target monitor on a simulator: the target monitor sets a
9627 breakpoint; the simulator encounters this breakpoint and halts
9628 the simulation handing control to GDB; GDB, noting that the stop
9629 address doesn't map to any known breakpoint, returns control back
9630 to the simulator; the simulator then delivers the hardware
9631 equivalent of a GDB_SIGNAL_TRAP to the program being
9633 signal_program
[GDB_SIGNAL_TRAP
] = 0;
9634 signal_program
[GDB_SIGNAL_INT
] = 0;
9636 /* Signals that are not errors should not normally enter the debugger. */
9637 signal_stop
[GDB_SIGNAL_ALRM
] = 0;
9638 signal_print
[GDB_SIGNAL_ALRM
] = 0;
9639 signal_stop
[GDB_SIGNAL_VTALRM
] = 0;
9640 signal_print
[GDB_SIGNAL_VTALRM
] = 0;
9641 signal_stop
[GDB_SIGNAL_PROF
] = 0;
9642 signal_print
[GDB_SIGNAL_PROF
] = 0;
9643 signal_stop
[GDB_SIGNAL_CHLD
] = 0;
9644 signal_print
[GDB_SIGNAL_CHLD
] = 0;
9645 signal_stop
[GDB_SIGNAL_IO
] = 0;
9646 signal_print
[GDB_SIGNAL_IO
] = 0;
9647 signal_stop
[GDB_SIGNAL_POLL
] = 0;
9648 signal_print
[GDB_SIGNAL_POLL
] = 0;
9649 signal_stop
[GDB_SIGNAL_URG
] = 0;
9650 signal_print
[GDB_SIGNAL_URG
] = 0;
9651 signal_stop
[GDB_SIGNAL_WINCH
] = 0;
9652 signal_print
[GDB_SIGNAL_WINCH
] = 0;
9653 signal_stop
[GDB_SIGNAL_PRIO
] = 0;
9654 signal_print
[GDB_SIGNAL_PRIO
] = 0;
9656 /* These signals are used internally by user-level thread
9657 implementations. (See signal(5) on Solaris.) Like the above
9658 signals, a healthy program receives and handles them as part of
9659 its normal operation. */
9660 signal_stop
[GDB_SIGNAL_LWP
] = 0;
9661 signal_print
[GDB_SIGNAL_LWP
] = 0;
9662 signal_stop
[GDB_SIGNAL_WAITING
] = 0;
9663 signal_print
[GDB_SIGNAL_WAITING
] = 0;
9664 signal_stop
[GDB_SIGNAL_CANCEL
] = 0;
9665 signal_print
[GDB_SIGNAL_CANCEL
] = 0;
9666 signal_stop
[GDB_SIGNAL_LIBRT
] = 0;
9667 signal_print
[GDB_SIGNAL_LIBRT
] = 0;
9669 /* Update cached state. */
9670 signal_cache_update (-1);
9672 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support
,
9673 &stop_on_solib_events
, _("\
9674 Set stopping for shared library events."), _("\
9675 Show stopping for shared library events."), _("\
9676 If nonzero, gdb will give control to the user when the dynamic linker\n\
9677 notifies gdb of shared library events. The most common event of interest\n\
9678 to the user would be loading/unloading of a new library."),
9679 set_stop_on_solib_events
,
9680 show_stop_on_solib_events
,
9681 &setlist
, &showlist
);
9683 add_setshow_enum_cmd ("follow-fork-mode", class_run
,
9684 follow_fork_mode_kind_names
,
9685 &follow_fork_mode_string
, _("\
9686 Set debugger response to a program call of fork or vfork."), _("\
9687 Show debugger response to a program call of fork or vfork."), _("\
9688 A fork or vfork creates a new process. follow-fork-mode can be:\n\
9689 parent - the original process is debugged after a fork\n\
9690 child - the new process is debugged after a fork\n\
9691 The unfollowed process will continue to run.\n\
9692 By default, the debugger will follow the parent process."),
9694 show_follow_fork_mode_string
,
9695 &setlist
, &showlist
);
9697 add_setshow_enum_cmd ("follow-exec-mode", class_run
,
9698 follow_exec_mode_names
,
9699 &follow_exec_mode_string
, _("\
9700 Set debugger response to a program call of exec."), _("\
9701 Show debugger response to a program call of exec."), _("\
9702 An exec call replaces the program image of a process.\n\
9704 follow-exec-mode can be:\n\
9706 new - the debugger creates a new inferior and rebinds the process\n\
9707 to this new inferior. The program the process was running before\n\
9708 the exec call can be restarted afterwards by restarting the original\n\
9711 same - the debugger keeps the process bound to the same inferior.\n\
9712 The new executable image replaces the previous executable loaded in\n\
9713 the inferior. Restarting the inferior after the exec call restarts\n\
9714 the executable the process was running after the exec call.\n\
9716 By default, the debugger will use the same inferior."),
9718 show_follow_exec_mode_string
,
9719 &setlist
, &showlist
);
9721 add_setshow_enum_cmd ("scheduler-locking", class_run
,
9722 scheduler_enums
, &scheduler_mode
, _("\
9723 Set mode for locking scheduler during execution."), _("\
9724 Show mode for locking scheduler during execution."), _("\
9725 off == no locking (threads may preempt at any time)\n\
9726 on == full locking (no thread except the current thread may run)\n\
9727 This applies to both normal execution and replay mode.\n\
9728 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
9729 In this mode, other threads may run during other commands.\n\
9730 This applies to both normal execution and replay mode.\n\
9731 replay == scheduler locked in replay mode and unlocked during normal execution."),
9732 set_schedlock_func
, /* traps on target vector */
9733 show_scheduler_mode
,
9734 &setlist
, &showlist
);
9736 add_setshow_boolean_cmd ("schedule-multiple", class_run
, &sched_multi
, _("\
9737 Set mode for resuming threads of all processes."), _("\
9738 Show mode for resuming threads of all processes."), _("\
9739 When on, execution commands (such as 'continue' or 'next') resume all\n\
9740 threads of all processes. When off (which is the default), execution\n\
9741 commands only resume the threads of the current process. The set of\n\
9742 threads that are resumed is further refined by the scheduler-locking\n\
9743 mode (see help set scheduler-locking)."),
9745 show_schedule_multiple
,
9746 &setlist
, &showlist
);
9748 add_setshow_boolean_cmd ("step-mode", class_run
, &step_stop_if_no_debug
, _("\
9749 Set mode of the step operation."), _("\
9750 Show mode of the step operation."), _("\
9751 When set, doing a step over a function without debug line information\n\
9752 will stop at the first instruction of that function. Otherwise, the\n\
9753 function is skipped and the step command stops at a different source line."),
9755 show_step_stop_if_no_debug
,
9756 &setlist
, &showlist
);
9758 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run
,
9759 &can_use_displaced_stepping
, _("\
9760 Set debugger's willingness to use displaced stepping."), _("\
9761 Show debugger's willingness to use displaced stepping."), _("\
9762 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9763 supported by the target architecture. If off, gdb will not use displaced\n\
9764 stepping to step over breakpoints, even if such is supported by the target\n\
9765 architecture. If auto (which is the default), gdb will use displaced stepping\n\
9766 if the target architecture supports it and non-stop mode is active, but will not\n\
9767 use it in all-stop mode (see help set non-stop)."),
9769 show_can_use_displaced_stepping
,
9770 &setlist
, &showlist
);
9772 add_setshow_enum_cmd ("exec-direction", class_run
, exec_direction_names
,
9773 &exec_direction
, _("Set direction of execution.\n\
9774 Options are 'forward' or 'reverse'."),
9775 _("Show direction of execution (forward/reverse)."),
9776 _("Tells gdb whether to execute forward or backward."),
9777 set_exec_direction_func
, show_exec_direction_func
,
9778 &setlist
, &showlist
);
9780 /* Set/show detach-on-fork: user-settable mode. */
9782 add_setshow_boolean_cmd ("detach-on-fork", class_run
, &detach_fork
, _("\
9783 Set whether gdb will detach the child of a fork."), _("\
9784 Show whether gdb will detach the child of a fork."), _("\
9785 Tells gdb whether to detach the child of a fork."),
9786 NULL
, NULL
, &setlist
, &showlist
);
9788 /* Set/show disable address space randomization mode. */
9790 add_setshow_boolean_cmd ("disable-randomization", class_support
,
9791 &disable_randomization
, _("\
9792 Set disabling of debuggee's virtual address space randomization."), _("\
9793 Show disabling of debuggee's virtual address space randomization."), _("\
9794 When this mode is on (which is the default), randomization of the virtual\n\
9795 address space is disabled. Standalone programs run with the randomization\n\
9796 enabled by default on some platforms."),
9797 &set_disable_randomization
,
9798 &show_disable_randomization
,
9799 &setlist
, &showlist
);
9801 /* ptid initializations */
9802 inferior_ptid
= null_ptid
;
9803 target_last_wait_ptid
= minus_one_ptid
;
9805 gdb::observers::thread_ptid_changed
.attach (infrun_thread_ptid_changed
);
9806 gdb::observers::thread_stop_requested
.attach (infrun_thread_stop_requested
);
9807 gdb::observers::thread_exit
.attach (infrun_thread_thread_exit
);
9808 gdb::observers::inferior_exit
.attach (infrun_inferior_exit
);
9810 /* Explicitly create without lookup, since that tries to create a
9811 value with a void typed value, and when we get here, gdbarch
9812 isn't initialized yet. At this point, we're quite sure there
9813 isn't another convenience variable of the same name. */
9814 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs
, NULL
);
9816 add_setshow_boolean_cmd ("observer", no_class
,
9817 &observer_mode_1
, _("\
9818 Set whether gdb controls the inferior in observer mode."), _("\
9819 Show whether gdb controls the inferior in observer mode."), _("\
9820 In observer mode, GDB can get data from the inferior, but not\n\
9821 affect its execution. Registers and memory may not be changed,\n\
9822 breakpoints may not be set, and the program cannot be interrupted\n\
9830 selftests::register_test ("infrun_thread_ptid_changed",
9831 selftests::infrun_thread_ptid_changed
);