gdb: get rid of get_displaced_stepping_state
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
b811d2c2 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
45741a9c 22#include "infrun.h"
c906108c
SS
23#include <ctype.h>
24#include "symtab.h"
25#include "frame.h"
26#include "inferior.h"
27#include "breakpoint.h"
c906108c
SS
28#include "gdbcore.h"
29#include "gdbcmd.h"
30#include "target.h"
2f4fcf00 31#include "target-connection.h"
c906108c
SS
32#include "gdbthread.h"
33#include "annotate.h"
1adeb98a 34#include "symfile.h"
7a292a7a 35#include "top.h"
2acceee2 36#include "inf-loop.h"
4e052eda 37#include "regcache.h"
fd0407d6 38#include "value.h"
76727919 39#include "observable.h"
f636b87d 40#include "language.h"
a77053c2 41#include "solib.h"
f17517ea 42#include "main.h"
186c406b 43#include "block.h"
034dad6f 44#include "mi/mi-common.h"
4f8d22e3 45#include "event-top.h"
96429cc8 46#include "record.h"
d02ed0bb 47#include "record-full.h"
edb3359d 48#include "inline-frame.h"
4efc6507 49#include "jit.h"
06cd862c 50#include "tracepoint.h"
1bfeeb0f 51#include "skip.h"
28106bc2
SDJ
52#include "probe.h"
53#include "objfiles.h"
de0bea00 54#include "completer.h"
9107fc8d 55#include "target-descriptions.h"
f15cb84a 56#include "target-dcache.h"
d83ad864 57#include "terminal.h"
ff862be4 58#include "solist.h"
400b5eca 59#include "gdbsupport/event-loop.h"
243a9253 60#include "thread-fsm.h"
268a13a5 61#include "gdbsupport/enum-flags.h"
5ed8105e 62#include "progspace-and-thread.h"
268a13a5 63#include "gdbsupport/gdb_optional.h"
46a62268 64#include "arch-utils.h"
268a13a5
TT
65#include "gdbsupport/scope-exit.h"
66#include "gdbsupport/forward-scope-exit.h"
06cc9596 67#include "gdbsupport/gdb_select.h"
5b6d1e4f 68#include <unordered_map>
93b54c8e 69#include "async-event.h"
b161a60d
SM
70#include "gdbsupport/selftest.h"
71#include "scoped-mock-context.h"
72#include "test-target.h"
ba988419 73#include "gdbsupport/common-debug.h"
c906108c
SS
74
75/* Prototypes for local functions */
76
2ea28649 77static void sig_print_info (enum gdb_signal);
c906108c 78
96baa820 79static void sig_print_header (void);
c906108c 80
d83ad864
DB
81static void follow_inferior_reset_breakpoints (void);
82
c4464ade 83static bool currently_stepping (struct thread_info *tp);
a289b8f6 84
2c03e5be 85static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
86
87static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
88
2484c66b
UW
89static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
90
c4464ade 91static bool maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
8550d3b3 92
aff4e175
AB
93static void resume (gdb_signal sig);
94
5b6d1e4f
PA
95static void wait_for_inferior (inferior *inf);
96
372316f1
PA
97/* Asynchronous signal handler registered as event loop source for
98 when we have pending events ready to be passed to the core. */
99static struct async_event_handler *infrun_async_inferior_event_token;
100
101/* Stores whether infrun_async was previously enabled or disabled.
102 Starts off as -1, indicating "never enabled/disabled". */
103static int infrun_is_async = -1;
104
105/* See infrun.h. */
106
107void
108infrun_async (int enable)
109{
110 if (infrun_is_async != enable)
111 {
112 infrun_is_async = enable;
113
1eb8556f 114 infrun_debug_printf ("enable=%d", enable);
372316f1
PA
115
116 if (enable)
117 mark_async_event_handler (infrun_async_inferior_event_token);
118 else
119 clear_async_event_handler (infrun_async_inferior_event_token);
120 }
121}
122
0b333c5e
PA
123/* See infrun.h. */
124
125void
126mark_infrun_async_event_handler (void)
127{
128 mark_async_event_handler (infrun_async_inferior_event_token);
129}
130
5fbbeb29
CF
131/* When set, stop the 'step' command if we enter a function which has
132 no line number information. The normal behavior is that we step
133 over such function. */
491144b5 134bool step_stop_if_no_debug = false;
920d2a44
AC
135static void
136show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
137 struct cmd_list_element *c, const char *value)
138{
139 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
140}
5fbbeb29 141
b9f437de
PA
142/* proceed and normal_stop use this to notify the user when the
143 inferior stopped in a different thread than it had been running
144 in. */
96baa820 145
39f77062 146static ptid_t previous_inferior_ptid;
7a292a7a 147
07107ca6
LM
148/* If set (default for legacy reasons), when following a fork, GDB
149 will detach from one of the fork branches, child or parent.
150 Exactly which branch is detached depends on 'set follow-fork-mode'
151 setting. */
152
491144b5 153static bool detach_fork = true;
6c95b8df 154
491144b5 155bool debug_displaced = false;
237fc4c9
PA
156static void
157show_debug_displaced (struct ui_file *file, int from_tty,
158 struct cmd_list_element *c, const char *value)
159{
160 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
161}
162
ccce17b0 163unsigned int debug_infrun = 0;
920d2a44
AC
164static void
165show_debug_infrun (struct ui_file *file, int from_tty,
166 struct cmd_list_element *c, const char *value)
167{
168 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
169}
527159b7 170
03583c20
UW
171/* Support for disabling address space randomization. */
172
491144b5 173bool disable_randomization = true;
03583c20
UW
174
175static void
176show_disable_randomization (struct ui_file *file, int from_tty,
177 struct cmd_list_element *c, const char *value)
178{
179 if (target_supports_disable_randomization ())
180 fprintf_filtered (file,
181 _("Disabling randomization of debuggee's "
182 "virtual address space is %s.\n"),
183 value);
184 else
185 fputs_filtered (_("Disabling randomization of debuggee's "
186 "virtual address space is unsupported on\n"
187 "this platform.\n"), file);
188}
189
190static void
eb4c3f4a 191set_disable_randomization (const char *args, int from_tty,
03583c20
UW
192 struct cmd_list_element *c)
193{
194 if (!target_supports_disable_randomization ())
195 error (_("Disabling randomization of debuggee's "
196 "virtual address space is unsupported on\n"
197 "this platform."));
198}
199
d32dc48e
PA
200/* User interface for non-stop mode. */
201
491144b5
CB
202bool non_stop = false;
203static bool non_stop_1 = false;
d32dc48e
PA
204
205static void
eb4c3f4a 206set_non_stop (const char *args, int from_tty,
d32dc48e
PA
207 struct cmd_list_element *c)
208{
55f6301a 209 if (target_has_execution ())
d32dc48e
PA
210 {
211 non_stop_1 = non_stop;
212 error (_("Cannot change this setting while the inferior is running."));
213 }
214
215 non_stop = non_stop_1;
216}
217
218static void
219show_non_stop (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
221{
222 fprintf_filtered (file,
223 _("Controlling the inferior in non-stop mode is %s.\n"),
224 value);
225}
226
d914c394
SS
227/* "Observer mode" is somewhat like a more extreme version of
228 non-stop, in which all GDB operations that might affect the
229 target's execution have been disabled. */
230
491144b5
CB
231bool observer_mode = false;
232static bool observer_mode_1 = false;
d914c394
SS
233
234static void
eb4c3f4a 235set_observer_mode (const char *args, int from_tty,
d914c394
SS
236 struct cmd_list_element *c)
237{
55f6301a 238 if (target_has_execution ())
d914c394
SS
239 {
240 observer_mode_1 = observer_mode;
241 error (_("Cannot change this setting while the inferior is running."));
242 }
243
244 observer_mode = observer_mode_1;
245
246 may_write_registers = !observer_mode;
247 may_write_memory = !observer_mode;
248 may_insert_breakpoints = !observer_mode;
249 may_insert_tracepoints = !observer_mode;
250 /* We can insert fast tracepoints in or out of observer mode,
251 but enable them if we're going into this mode. */
252 if (observer_mode)
491144b5 253 may_insert_fast_tracepoints = true;
d914c394
SS
254 may_stop = !observer_mode;
255 update_target_permissions ();
256
257 /* Going *into* observer mode we must force non-stop, then
258 going out we leave it that way. */
259 if (observer_mode)
260 {
d914c394 261 pagination_enabled = 0;
491144b5 262 non_stop = non_stop_1 = true;
d914c394
SS
263 }
264
265 if (from_tty)
266 printf_filtered (_("Observer mode is now %s.\n"),
267 (observer_mode ? "on" : "off"));
268}
269
270static void
271show_observer_mode (struct ui_file *file, int from_tty,
272 struct cmd_list_element *c, const char *value)
273{
274 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
275}
276
277/* This updates the value of observer mode based on changes in
278 permissions. Note that we are deliberately ignoring the values of
279 may-write-registers and may-write-memory, since the user may have
280 reason to enable these during a session, for instance to turn on a
281 debugging-related global. */
282
283void
284update_observer_mode (void)
285{
491144b5
CB
286 bool newval = (!may_insert_breakpoints
287 && !may_insert_tracepoints
288 && may_insert_fast_tracepoints
289 && !may_stop
290 && non_stop);
d914c394
SS
291
292 /* Let the user know if things change. */
293 if (newval != observer_mode)
294 printf_filtered (_("Observer mode is now %s.\n"),
295 (newval ? "on" : "off"));
296
297 observer_mode = observer_mode_1 = newval;
298}
c2c6d25f 299
c906108c
SS
300/* Tables of how to react to signals; the user sets them. */
301
adc6a863
PA
302static unsigned char signal_stop[GDB_SIGNAL_LAST];
303static unsigned char signal_print[GDB_SIGNAL_LAST];
304static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 305
ab04a2af
TT
306/* Table of signals that are registered with "catch signal". A
307 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
308 signal" command. */
309static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 310
2455069d
UW
311/* Table of signals that the target may silently handle.
312 This is automatically determined from the flags above,
313 and simply cached here. */
adc6a863 314static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 315
c906108c
SS
316#define SET_SIGS(nsigs,sigs,flags) \
317 do { \
318 int signum = (nsigs); \
319 while (signum-- > 0) \
320 if ((sigs)[signum]) \
321 (flags)[signum] = 1; \
322 } while (0)
323
324#define UNSET_SIGS(nsigs,sigs,flags) \
325 do { \
326 int signum = (nsigs); \
327 while (signum-- > 0) \
328 if ((sigs)[signum]) \
329 (flags)[signum] = 0; \
330 } while (0)
331
9b224c5e
PA
332/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
333 this function is to avoid exporting `signal_program'. */
334
335void
336update_signals_program_target (void)
337{
adc6a863 338 target_program_signals (signal_program);
9b224c5e
PA
339}
340
1777feb0 341/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 342
edb3359d 343#define RESUME_ALL minus_one_ptid
c906108c
SS
344
345/* Command list pointer for the "stop" placeholder. */
346
347static struct cmd_list_element *stop_command;
348
c906108c
SS
349/* Nonzero if we want to give control to the user when we're notified
350 of shared library events by the dynamic linker. */
628fe4e4 351int stop_on_solib_events;
f9e14852
GB
352
353/* Enable or disable optional shared library event breakpoints
354 as appropriate when the above flag is changed. */
355
356static void
eb4c3f4a
TT
357set_stop_on_solib_events (const char *args,
358 int from_tty, struct cmd_list_element *c)
f9e14852
GB
359{
360 update_solib_breakpoints ();
361}
362
920d2a44
AC
363static void
364show_stop_on_solib_events (struct ui_file *file, int from_tty,
365 struct cmd_list_element *c, const char *value)
366{
367 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
368 value);
369}
c906108c 370
c4464ade 371/* True after stop if current stack frame should be printed. */
c906108c 372
c4464ade 373static bool stop_print_frame;
c906108c 374
5b6d1e4f
PA
375/* This is a cached copy of the target/ptid/waitstatus of the last
376 event returned by target_wait()/deprecated_target_wait_hook().
377 This information is returned by get_last_target_status(). */
378static process_stratum_target *target_last_proc_target;
39f77062 379static ptid_t target_last_wait_ptid;
e02bc4cc
DS
380static struct target_waitstatus target_last_waitstatus;
381
4e1c45ea 382void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 383
53904c9e
AC
384static const char follow_fork_mode_child[] = "child";
385static const char follow_fork_mode_parent[] = "parent";
386
40478521 387static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
388 follow_fork_mode_child,
389 follow_fork_mode_parent,
390 NULL
ef346e04 391};
c906108c 392
53904c9e 393static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
394static void
395show_follow_fork_mode_string (struct ui_file *file, int from_tty,
396 struct cmd_list_element *c, const char *value)
397{
3e43a32a
MS
398 fprintf_filtered (file,
399 _("Debugger response to a program "
400 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
401 value);
402}
c906108c
SS
403\f
404
d83ad864
DB
405/* Handle changes to the inferior list based on the type of fork,
406 which process is being followed, and whether the other process
407 should be detached. On entry inferior_ptid must be the ptid of
408 the fork parent. At return inferior_ptid is the ptid of the
409 followed inferior. */
410
5ab2fbf1
SM
411static bool
412follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
413{
414 int has_vforked;
79639e11 415 ptid_t parent_ptid, child_ptid;
d83ad864
DB
416
417 has_vforked = (inferior_thread ()->pending_follow.kind
418 == TARGET_WAITKIND_VFORKED);
79639e11
PA
419 parent_ptid = inferior_ptid;
420 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
421
422 if (has_vforked
423 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 424 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
425 && !(follow_child || detach_fork || sched_multi))
426 {
427 /* The parent stays blocked inside the vfork syscall until the
428 child execs or exits. If we don't let the child run, then
429 the parent stays blocked. If we're telling the parent to run
430 in the foreground, the user will not be able to ctrl-c to get
431 back the terminal, effectively hanging the debug session. */
432 fprintf_filtered (gdb_stderr, _("\
433Can not resume the parent process over vfork in the foreground while\n\
434holding the child stopped. Try \"set detach-on-fork\" or \
435\"set schedule-multiple\".\n"));
d83ad864
DB
436 return 1;
437 }
438
439 if (!follow_child)
440 {
441 /* Detach new forked process? */
442 if (detach_fork)
443 {
d83ad864
DB
444 /* Before detaching from the child, remove all breakpoints
445 from it. If we forked, then this has already been taken
446 care of by infrun.c. If we vforked however, any
447 breakpoint inserted in the parent is visible in the
448 child, even those added while stopped in a vfork
449 catchpoint. This will remove the breakpoints from the
450 parent also, but they'll be reinserted below. */
451 if (has_vforked)
452 {
453 /* Keep breakpoints list in sync. */
00431a78 454 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
455 }
456
f67c0c91 457 if (print_inferior_events)
d83ad864 458 {
8dd06f7a 459 /* Ensure that we have a process ptid. */
e99b03dc 460 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 461
223ffa71 462 target_terminal::ours_for_output ();
d83ad864 463 fprintf_filtered (gdb_stdlog,
f67c0c91 464 _("[Detaching after %s from child %s]\n"),
6f259a23 465 has_vforked ? "vfork" : "fork",
a068643d 466 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
467 }
468 }
469 else
470 {
471 struct inferior *parent_inf, *child_inf;
d83ad864
DB
472
473 /* Add process to GDB's tables. */
e99b03dc 474 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
475
476 parent_inf = current_inferior ();
477 child_inf->attach_flag = parent_inf->attach_flag;
478 copy_terminal_info (child_inf, parent_inf);
479 child_inf->gdbarch = parent_inf->gdbarch;
480 copy_inferior_target_desc_info (child_inf, parent_inf);
481
5ed8105e 482 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 483
2a00d7ce 484 set_current_inferior (child_inf);
5b6d1e4f 485 switch_to_no_thread ();
d83ad864 486 child_inf->symfile_flags = SYMFILE_NO_READ;
5b6d1e4f 487 push_target (parent_inf->process_target ());
18493a00
PA
488 thread_info *child_thr
489 = add_thread_silent (child_inf->process_target (), child_ptid);
d83ad864
DB
490
491 /* If this is a vfork child, then the address-space is
492 shared with the parent. */
493 if (has_vforked)
494 {
495 child_inf->pspace = parent_inf->pspace;
496 child_inf->aspace = parent_inf->aspace;
497
5b6d1e4f
PA
498 exec_on_vfork ();
499
d83ad864
DB
500 /* The parent will be frozen until the child is done
501 with the shared region. Keep track of the
502 parent. */
503 child_inf->vfork_parent = parent_inf;
504 child_inf->pending_detach = 0;
505 parent_inf->vfork_child = child_inf;
506 parent_inf->pending_detach = 0;
18493a00
PA
507
508 /* Now that the inferiors and program spaces are all
509 wired up, we can switch to the child thread (which
510 switches inferior and program space too). */
511 switch_to_thread (child_thr);
d83ad864
DB
512 }
513 else
514 {
515 child_inf->aspace = new_address_space ();
564b1e3f 516 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
517 child_inf->removable = 1;
518 set_current_program_space (child_inf->pspace);
519 clone_program_space (child_inf->pspace, parent_inf->pspace);
520
18493a00
PA
521 /* solib_create_inferior_hook relies on the current
522 thread. */
523 switch_to_thread (child_thr);
524
d83ad864
DB
525 /* Let the shared library layer (e.g., solib-svr4) learn
526 about this new process, relocate the cloned exec, pull
527 in shared libraries, and install the solib event
528 breakpoint. If a "cloned-VM" event was propagated
529 better throughout the core, this wouldn't be
530 required. */
531 solib_create_inferior_hook (0);
532 }
d83ad864
DB
533 }
534
535 if (has_vforked)
536 {
537 struct inferior *parent_inf;
538
539 parent_inf = current_inferior ();
540
541 /* If we detached from the child, then we have to be careful
542 to not insert breakpoints in the parent until the child
543 is done with the shared memory region. However, if we're
544 staying attached to the child, then we can and should
545 insert breakpoints, so that we can debug it. A
546 subsequent child exec or exit is enough to know when does
547 the child stops using the parent's address space. */
548 parent_inf->waiting_for_vfork_done = detach_fork;
549 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
550 }
551 }
552 else
553 {
554 /* Follow the child. */
555 struct inferior *parent_inf, *child_inf;
556 struct program_space *parent_pspace;
557
f67c0c91 558 if (print_inferior_events)
d83ad864 559 {
f67c0c91
SDJ
560 std::string parent_pid = target_pid_to_str (parent_ptid);
561 std::string child_pid = target_pid_to_str (child_ptid);
562
223ffa71 563 target_terminal::ours_for_output ();
6f259a23 564 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
565 _("[Attaching after %s %s to child %s]\n"),
566 parent_pid.c_str (),
6f259a23 567 has_vforked ? "vfork" : "fork",
f67c0c91 568 child_pid.c_str ());
d83ad864
DB
569 }
570
571 /* Add the new inferior first, so that the target_detach below
572 doesn't unpush the target. */
573
e99b03dc 574 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
575
576 parent_inf = current_inferior ();
577 child_inf->attach_flag = parent_inf->attach_flag;
578 copy_terminal_info (child_inf, parent_inf);
579 child_inf->gdbarch = parent_inf->gdbarch;
580 copy_inferior_target_desc_info (child_inf, parent_inf);
581
582 parent_pspace = parent_inf->pspace;
583
5b6d1e4f 584 process_stratum_target *target = parent_inf->process_target ();
d83ad864 585
5b6d1e4f
PA
586 {
587 /* Hold a strong reference to the target while (maybe)
588 detaching the parent. Otherwise detaching could close the
589 target. */
590 auto target_ref = target_ops_ref::new_reference (target);
591
592 /* If we're vforking, we want to hold on to the parent until
593 the child exits or execs. At child exec or exit time we
594 can remove the old breakpoints from the parent and detach
595 or resume debugging it. Otherwise, detach the parent now;
596 we'll want to reuse it's program/address spaces, but we
597 can't set them to the child before removing breakpoints
598 from the parent, otherwise, the breakpoints module could
599 decide to remove breakpoints from the wrong process (since
600 they'd be assigned to the same address space). */
601
602 if (has_vforked)
603 {
604 gdb_assert (child_inf->vfork_parent == NULL);
605 gdb_assert (parent_inf->vfork_child == NULL);
606 child_inf->vfork_parent = parent_inf;
607 child_inf->pending_detach = 0;
608 parent_inf->vfork_child = child_inf;
609 parent_inf->pending_detach = detach_fork;
610 parent_inf->waiting_for_vfork_done = 0;
611 }
612 else if (detach_fork)
613 {
614 if (print_inferior_events)
615 {
616 /* Ensure that we have a process ptid. */
617 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
618
619 target_terminal::ours_for_output ();
620 fprintf_filtered (gdb_stdlog,
621 _("[Detaching after fork from "
622 "parent %s]\n"),
623 target_pid_to_str (process_ptid).c_str ());
624 }
8dd06f7a 625
5b6d1e4f
PA
626 target_detach (parent_inf, 0);
627 parent_inf = NULL;
628 }
6f259a23 629
5b6d1e4f 630 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 631
5b6d1e4f
PA
632 /* Add the child thread to the appropriate lists, and switch
633 to this new thread, before cloning the program space, and
634 informing the solib layer about this new process. */
d83ad864 635
5b6d1e4f
PA
636 set_current_inferior (child_inf);
637 push_target (target);
638 }
d83ad864 639
18493a00 640 thread_info *child_thr = add_thread_silent (target, child_ptid);
d83ad864
DB
641
642 /* If this is a vfork child, then the address-space is shared
643 with the parent. If we detached from the parent, then we can
644 reuse the parent's program/address spaces. */
645 if (has_vforked || detach_fork)
646 {
647 child_inf->pspace = parent_pspace;
648 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
649
650 exec_on_vfork ();
d83ad864
DB
651 }
652 else
653 {
654 child_inf->aspace = new_address_space ();
564b1e3f 655 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
656 child_inf->removable = 1;
657 child_inf->symfile_flags = SYMFILE_NO_READ;
658 set_current_program_space (child_inf->pspace);
659 clone_program_space (child_inf->pspace, parent_pspace);
660
661 /* Let the shared library layer (e.g., solib-svr4) learn
662 about this new process, relocate the cloned exec, pull in
663 shared libraries, and install the solib event breakpoint.
664 If a "cloned-VM" event was propagated better throughout
665 the core, this wouldn't be required. */
666 solib_create_inferior_hook (0);
667 }
18493a00
PA
668
669 switch_to_thread (child_thr);
d83ad864
DB
670 }
671
672 return target_follow_fork (follow_child, detach_fork);
673}
674
e58b0e63
PA
675/* Tell the target to follow the fork we're stopped at. Returns true
676 if the inferior should be resumed; false, if the target for some
677 reason decided it's best not to resume. */
678
5ab2fbf1
SM
679static bool
680follow_fork ()
c906108c 681{
5ab2fbf1
SM
682 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
683 bool should_resume = true;
e58b0e63
PA
684 struct thread_info *tp;
685
686 /* Copy user stepping state to the new inferior thread. FIXME: the
687 followed fork child thread should have a copy of most of the
4e3990f4
DE
688 parent thread structure's run control related fields, not just these.
689 Initialized to avoid "may be used uninitialized" warnings from gcc. */
690 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 691 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
692 CORE_ADDR step_range_start = 0;
693 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
694 int current_line = 0;
695 symtab *current_symtab = NULL;
4e3990f4 696 struct frame_id step_frame_id = { 0 };
8980e177 697 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
698
699 if (!non_stop)
700 {
5b6d1e4f 701 process_stratum_target *wait_target;
e58b0e63
PA
702 ptid_t wait_ptid;
703 struct target_waitstatus wait_status;
704
705 /* Get the last target status returned by target_wait(). */
5b6d1e4f 706 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
707
708 /* If not stopped at a fork event, then there's nothing else to
709 do. */
710 if (wait_status.kind != TARGET_WAITKIND_FORKED
711 && wait_status.kind != TARGET_WAITKIND_VFORKED)
712 return 1;
713
714 /* Check if we switched over from WAIT_PTID, since the event was
715 reported. */
00431a78 716 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
717 && (current_inferior ()->process_target () != wait_target
718 || inferior_ptid != wait_ptid))
e58b0e63
PA
719 {
720 /* We did. Switch back to WAIT_PTID thread, to tell the
721 target to follow it (in either direction). We'll
722 afterwards refuse to resume, and inform the user what
723 happened. */
5b6d1e4f 724 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 725 switch_to_thread (wait_thread);
5ab2fbf1 726 should_resume = false;
e58b0e63
PA
727 }
728 }
729
730 tp = inferior_thread ();
731
732 /* If there were any forks/vforks that were caught and are now to be
733 followed, then do so now. */
734 switch (tp->pending_follow.kind)
735 {
736 case TARGET_WAITKIND_FORKED:
737 case TARGET_WAITKIND_VFORKED:
738 {
739 ptid_t parent, child;
740
741 /* If the user did a next/step, etc, over a fork call,
742 preserve the stepping state in the fork child. */
743 if (follow_child && should_resume)
744 {
8358c15c
JK
745 step_resume_breakpoint = clone_momentary_breakpoint
746 (tp->control.step_resume_breakpoint);
16c381f0
JK
747 step_range_start = tp->control.step_range_start;
748 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
749 current_line = tp->current_line;
750 current_symtab = tp->current_symtab;
16c381f0 751 step_frame_id = tp->control.step_frame_id;
186c406b
TT
752 exception_resume_breakpoint
753 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 754 thread_fsm = tp->thread_fsm;
e58b0e63
PA
755
756 /* For now, delete the parent's sr breakpoint, otherwise,
757 parent/child sr breakpoints are considered duplicates,
758 and the child version will not be installed. Remove
759 this when the breakpoints module becomes aware of
760 inferiors and address spaces. */
761 delete_step_resume_breakpoint (tp);
16c381f0
JK
762 tp->control.step_range_start = 0;
763 tp->control.step_range_end = 0;
764 tp->control.step_frame_id = null_frame_id;
186c406b 765 delete_exception_resume_breakpoint (tp);
8980e177 766 tp->thread_fsm = NULL;
e58b0e63
PA
767 }
768
769 parent = inferior_ptid;
770 child = tp->pending_follow.value.related_pid;
771
5b6d1e4f 772 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
773 /* Set up inferior(s) as specified by the caller, and tell the
774 target to do whatever is necessary to follow either parent
775 or child. */
776 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
777 {
778 /* Target refused to follow, or there's some other reason
779 we shouldn't resume. */
780 should_resume = 0;
781 }
782 else
783 {
784 /* This pending follow fork event is now handled, one way
785 or another. The previous selected thread may be gone
786 from the lists by now, but if it is still around, need
787 to clear the pending follow request. */
5b6d1e4f 788 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
789 if (tp)
790 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
791
792 /* This makes sure we don't try to apply the "Switched
793 over from WAIT_PID" logic above. */
794 nullify_last_target_wait_ptid ();
795
1777feb0 796 /* If we followed the child, switch to it... */
e58b0e63
PA
797 if (follow_child)
798 {
5b6d1e4f 799 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 800 switch_to_thread (child_thr);
e58b0e63
PA
801
802 /* ... and preserve the stepping state, in case the
803 user was stepping over the fork call. */
804 if (should_resume)
805 {
806 tp = inferior_thread ();
8358c15c
JK
807 tp->control.step_resume_breakpoint
808 = step_resume_breakpoint;
16c381f0
JK
809 tp->control.step_range_start = step_range_start;
810 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
811 tp->current_line = current_line;
812 tp->current_symtab = current_symtab;
16c381f0 813 tp->control.step_frame_id = step_frame_id;
186c406b
TT
814 tp->control.exception_resume_breakpoint
815 = exception_resume_breakpoint;
8980e177 816 tp->thread_fsm = thread_fsm;
e58b0e63
PA
817 }
818 else
819 {
820 /* If we get here, it was because we're trying to
821 resume from a fork catchpoint, but, the user
822 has switched threads away from the thread that
823 forked. In that case, the resume command
824 issued is most likely not applicable to the
825 child, so just warn, and refuse to resume. */
3e43a32a 826 warning (_("Not resuming: switched threads "
fd7dcb94 827 "before following fork child."));
e58b0e63
PA
828 }
829
830 /* Reset breakpoints in the child as appropriate. */
831 follow_inferior_reset_breakpoints ();
832 }
e58b0e63
PA
833 }
834 }
835 break;
836 case TARGET_WAITKIND_SPURIOUS:
837 /* Nothing to follow. */
838 break;
839 default:
840 internal_error (__FILE__, __LINE__,
841 "Unexpected pending_follow.kind %d\n",
842 tp->pending_follow.kind);
843 break;
844 }
c906108c 845
e58b0e63 846 return should_resume;
c906108c
SS
847}
848
d83ad864 849static void
6604731b 850follow_inferior_reset_breakpoints (void)
c906108c 851{
4e1c45ea
PA
852 struct thread_info *tp = inferior_thread ();
853
6604731b
DJ
854 /* Was there a step_resume breakpoint? (There was if the user
855 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
856 thread number. Cloned step_resume breakpoints are disabled on
857 creation, so enable it here now that it is associated with the
858 correct thread.
6604731b
DJ
859
860 step_resumes are a form of bp that are made to be per-thread.
861 Since we created the step_resume bp when the parent process
862 was being debugged, and now are switching to the child process,
863 from the breakpoint package's viewpoint, that's a switch of
864 "threads". We must update the bp's notion of which thread
865 it is for, or it'll be ignored when it triggers. */
866
8358c15c 867 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
868 {
869 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
870 tp->control.step_resume_breakpoint->loc->enabled = 1;
871 }
6604731b 872
a1aa2221 873 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 874 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
875 {
876 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
877 tp->control.exception_resume_breakpoint->loc->enabled = 1;
878 }
186c406b 879
6604731b
DJ
880 /* Reinsert all breakpoints in the child. The user may have set
881 breakpoints after catching the fork, in which case those
882 were never set in the child, but only in the parent. This makes
883 sure the inserted breakpoints match the breakpoint list. */
884
885 breakpoint_re_set ();
886 insert_breakpoints ();
c906108c 887}
c906108c 888
6c95b8df
PA
889/* The child has exited or execed: resume threads of the parent the
890 user wanted to be executing. */
891
892static int
893proceed_after_vfork_done (struct thread_info *thread,
894 void *arg)
895{
896 int pid = * (int *) arg;
897
00431a78
PA
898 if (thread->ptid.pid () == pid
899 && thread->state == THREAD_RUNNING
900 && !thread->executing
6c95b8df 901 && !thread->stop_requested
a493e3e2 902 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df 903 {
1eb8556f
SM
904 infrun_debug_printf ("resuming vfork parent thread %s",
905 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 906
00431a78 907 switch_to_thread (thread);
70509625 908 clear_proceed_status (0);
64ce06e4 909 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
910 }
911
912 return 0;
913}
914
915/* Called whenever we notice an exec or exit event, to handle
916 detaching or resuming a vfork parent. */
917
918static void
919handle_vfork_child_exec_or_exit (int exec)
920{
921 struct inferior *inf = current_inferior ();
922
923 if (inf->vfork_parent)
924 {
925 int resume_parent = -1;
926
927 /* This exec or exit marks the end of the shared memory region
b73715df
TV
928 between the parent and the child. Break the bonds. */
929 inferior *vfork_parent = inf->vfork_parent;
930 inf->vfork_parent->vfork_child = NULL;
931 inf->vfork_parent = NULL;
6c95b8df 932
b73715df
TV
933 /* If the user wanted to detach from the parent, now is the
934 time. */
935 if (vfork_parent->pending_detach)
6c95b8df 936 {
6c95b8df
PA
937 struct program_space *pspace;
938 struct address_space *aspace;
939
1777feb0 940 /* follow-fork child, detach-on-fork on. */
6c95b8df 941
b73715df 942 vfork_parent->pending_detach = 0;
68c9da30 943
18493a00 944 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
945
946 /* We're letting loose of the parent. */
18493a00 947 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 948 switch_to_thread (tp);
6c95b8df
PA
949
950 /* We're about to detach from the parent, which implicitly
951 removes breakpoints from its address space. There's a
952 catch here: we want to reuse the spaces for the child,
953 but, parent/child are still sharing the pspace at this
954 point, although the exec in reality makes the kernel give
955 the child a fresh set of new pages. The problem here is
956 that the breakpoints module being unaware of this, would
957 likely chose the child process to write to the parent
958 address space. Swapping the child temporarily away from
959 the spaces has the desired effect. Yes, this is "sort
960 of" a hack. */
961
962 pspace = inf->pspace;
963 aspace = inf->aspace;
964 inf->aspace = NULL;
965 inf->pspace = NULL;
966
f67c0c91 967 if (print_inferior_events)
6c95b8df 968 {
a068643d 969 std::string pidstr
b73715df 970 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 971
223ffa71 972 target_terminal::ours_for_output ();
6c95b8df
PA
973
974 if (exec)
6f259a23
DB
975 {
976 fprintf_filtered (gdb_stdlog,
f67c0c91 977 _("[Detaching vfork parent %s "
a068643d 978 "after child exec]\n"), pidstr.c_str ());
6f259a23 979 }
6c95b8df 980 else
6f259a23
DB
981 {
982 fprintf_filtered (gdb_stdlog,
f67c0c91 983 _("[Detaching vfork parent %s "
a068643d 984 "after child exit]\n"), pidstr.c_str ());
6f259a23 985 }
6c95b8df
PA
986 }
987
b73715df 988 target_detach (vfork_parent, 0);
6c95b8df
PA
989
990 /* Put it back. */
991 inf->pspace = pspace;
992 inf->aspace = aspace;
6c95b8df
PA
993 }
994 else if (exec)
995 {
996 /* We're staying attached to the parent, so, really give the
997 child a new address space. */
564b1e3f 998 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
999 inf->aspace = inf->pspace->aspace;
1000 inf->removable = 1;
1001 set_current_program_space (inf->pspace);
1002
b73715df 1003 resume_parent = vfork_parent->pid;
6c95b8df
PA
1004 }
1005 else
1006 {
6c95b8df
PA
1007 /* If this is a vfork child exiting, then the pspace and
1008 aspaces were shared with the parent. Since we're
1009 reporting the process exit, we'll be mourning all that is
1010 found in the address space, and switching to null_ptid,
1011 preparing to start a new inferior. But, since we don't
1012 want to clobber the parent's address/program spaces, we
1013 go ahead and create a new one for this exiting
1014 inferior. */
1015
18493a00 1016 /* Switch to no-thread while running clone_program_space, so
5ed8105e
PA
1017 that clone_program_space doesn't want to read the
1018 selected frame of a dead process. */
18493a00
PA
1019 scoped_restore_current_thread restore_thread;
1020 switch_to_no_thread ();
6c95b8df 1021
53af73bf
PA
1022 inf->pspace = new program_space (maybe_new_address_space ());
1023 inf->aspace = inf->pspace->aspace;
1024 set_current_program_space (inf->pspace);
6c95b8df 1025 inf->removable = 1;
7dcd53a0 1026 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1027 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1028
b73715df 1029 resume_parent = vfork_parent->pid;
6c95b8df
PA
1030 }
1031
6c95b8df
PA
1032 gdb_assert (current_program_space == inf->pspace);
1033
1034 if (non_stop && resume_parent != -1)
1035 {
1036 /* If the user wanted the parent to be running, let it go
1037 free now. */
5ed8105e 1038 scoped_restore_current_thread restore_thread;
6c95b8df 1039
1eb8556f
SM
1040 infrun_debug_printf ("resuming vfork parent process %d",
1041 resume_parent);
6c95b8df
PA
1042
1043 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1044 }
1045 }
1046}
1047
eb6c553b 1048/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1049
1050static const char follow_exec_mode_new[] = "new";
1051static const char follow_exec_mode_same[] = "same";
40478521 1052static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1053{
1054 follow_exec_mode_new,
1055 follow_exec_mode_same,
1056 NULL,
1057};
1058
1059static const char *follow_exec_mode_string = follow_exec_mode_same;
1060static void
1061show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1062 struct cmd_list_element *c, const char *value)
1063{
1064 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1065}
1066
ecf45d2c 1067/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1068
c906108c 1069static void
4ca51187 1070follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1071{
6c95b8df 1072 struct inferior *inf = current_inferior ();
e99b03dc 1073 int pid = ptid.pid ();
94585166 1074 ptid_t process_ptid;
7a292a7a 1075
65d2b333
PW
1076 /* Switch terminal for any messages produced e.g. by
1077 breakpoint_re_set. */
1078 target_terminal::ours_for_output ();
1079
c906108c
SS
1080 /* This is an exec event that we actually wish to pay attention to.
1081 Refresh our symbol table to the newly exec'd program, remove any
1082 momentary bp's, etc.
1083
1084 If there are breakpoints, they aren't really inserted now,
1085 since the exec() transformed our inferior into a fresh set
1086 of instructions.
1087
1088 We want to preserve symbolic breakpoints on the list, since
1089 we have hopes that they can be reset after the new a.out's
1090 symbol table is read.
1091
1092 However, any "raw" breakpoints must be removed from the list
1093 (e.g., the solib bp's), since their address is probably invalid
1094 now.
1095
1096 And, we DON'T want to call delete_breakpoints() here, since
1097 that may write the bp's "shadow contents" (the instruction
85102364 1098 value that was overwritten with a TRAP instruction). Since
1777feb0 1099 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1100
1101 mark_breakpoints_out ();
1102
95e50b27
PA
1103 /* The target reports the exec event to the main thread, even if
1104 some other thread does the exec, and even if the main thread was
1105 stopped or already gone. We may still have non-leader threads of
1106 the process on our list. E.g., on targets that don't have thread
1107 exit events (like remote); or on native Linux in non-stop mode if
1108 there were only two threads in the inferior and the non-leader
1109 one is the one that execs (and nothing forces an update of the
1110 thread list up to here). When debugging remotely, it's best to
1111 avoid extra traffic, when possible, so avoid syncing the thread
1112 list with the target, and instead go ahead and delete all threads
1113 of the process but one that reported the event. Note this must
1114 be done before calling update_breakpoints_after_exec, as
1115 otherwise clearing the threads' resources would reference stale
1116 thread breakpoints -- it may have been one of these threads that
1117 stepped across the exec. We could just clear their stepping
1118 states, but as long as we're iterating, might as well delete
1119 them. Deleting them now rather than at the next user-visible
1120 stop provides a nicer sequence of events for user and MI
1121 notifications. */
08036331 1122 for (thread_info *th : all_threads_safe ())
d7e15655 1123 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1124 delete_thread (th);
95e50b27
PA
1125
1126 /* We also need to clear any left over stale state for the
1127 leader/event thread. E.g., if there was any step-resume
1128 breakpoint or similar, it's gone now. We cannot truly
1129 step-to-next statement through an exec(). */
08036331 1130 thread_info *th = inferior_thread ();
8358c15c 1131 th->control.step_resume_breakpoint = NULL;
186c406b 1132 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1133 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1134 th->control.step_range_start = 0;
1135 th->control.step_range_end = 0;
c906108c 1136
95e50b27
PA
1137 /* The user may have had the main thread held stopped in the
1138 previous image (e.g., schedlock on, or non-stop). Release
1139 it now. */
a75724bc
PA
1140 th->stop_requested = 0;
1141
95e50b27
PA
1142 update_breakpoints_after_exec ();
1143
1777feb0 1144 /* What is this a.out's name? */
f2907e49 1145 process_ptid = ptid_t (pid);
6c95b8df 1146 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1147 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1148 exec_file_target);
c906108c
SS
1149
1150 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1151 inferior has essentially been killed & reborn. */
7a292a7a 1152
6ca15a4b 1153 breakpoint_init_inferior (inf_execd);
e85a822c 1154
797bc1cb
TT
1155 gdb::unique_xmalloc_ptr<char> exec_file_host
1156 = exec_file_find (exec_file_target, NULL);
ff862be4 1157
ecf45d2c
SL
1158 /* If we were unable to map the executable target pathname onto a host
1159 pathname, tell the user that. Otherwise GDB's subsequent behavior
1160 is confusing. Maybe it would even be better to stop at this point
1161 so that the user can specify a file manually before continuing. */
1162 if (exec_file_host == NULL)
1163 warning (_("Could not load symbols for executable %s.\n"
1164 "Do you need \"set sysroot\"?"),
1165 exec_file_target);
c906108c 1166
cce9b6bf
PA
1167 /* Reset the shared library package. This ensures that we get a
1168 shlib event when the child reaches "_start", at which point the
1169 dld will have had a chance to initialize the child. */
1170 /* Also, loading a symbol file below may trigger symbol lookups, and
1171 we don't want those to be satisfied by the libraries of the
1172 previous incarnation of this process. */
1173 no_shared_libraries (NULL, 0);
1174
6c95b8df
PA
1175 if (follow_exec_mode_string == follow_exec_mode_new)
1176 {
6c95b8df
PA
1177 /* The user wants to keep the old inferior and program spaces
1178 around. Create a new fresh one, and switch to it. */
1179
35ed81d4
SM
1180 /* Do exit processing for the original inferior before setting the new
1181 inferior's pid. Having two inferiors with the same pid would confuse
1182 find_inferior_p(t)id. Transfer the terminal state and info from the
1183 old to the new inferior. */
1184 inf = add_inferior_with_spaces ();
1185 swap_terminal_info (inf, current_inferior ());
057302ce 1186 exit_inferior_silent (current_inferior ());
17d8546e 1187
94585166 1188 inf->pid = pid;
ecf45d2c 1189 target_follow_exec (inf, exec_file_target);
6c95b8df 1190
5b6d1e4f
PA
1191 inferior *org_inferior = current_inferior ();
1192 switch_to_inferior_no_thread (inf);
1193 push_target (org_inferior->process_target ());
1194 thread_info *thr = add_thread (inf->process_target (), ptid);
1195 switch_to_thread (thr);
6c95b8df 1196 }
9107fc8d
PA
1197 else
1198 {
1199 /* The old description may no longer be fit for the new image.
1200 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1201 old description; we'll read a new one below. No need to do
1202 this on "follow-exec-mode new", as the old inferior stays
1203 around (its description is later cleared/refetched on
1204 restart). */
1205 target_clear_description ();
1206 }
6c95b8df
PA
1207
1208 gdb_assert (current_program_space == inf->pspace);
1209
ecf45d2c
SL
1210 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1211 because the proper displacement for a PIE (Position Independent
1212 Executable) main symbol file will only be computed by
1213 solib_create_inferior_hook below. breakpoint_re_set would fail
1214 to insert the breakpoints with the zero displacement. */
797bc1cb 1215 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1216
9107fc8d
PA
1217 /* If the target can specify a description, read it. Must do this
1218 after flipping to the new executable (because the target supplied
1219 description must be compatible with the executable's
1220 architecture, and the old executable may e.g., be 32-bit, while
1221 the new one 64-bit), and before anything involving memory or
1222 registers. */
1223 target_find_description ();
1224
42a4fec5 1225 gdb::observers::inferior_execd.notify (inf);
4efc6507 1226
c1e56572
JK
1227 breakpoint_re_set ();
1228
c906108c
SS
1229 /* Reinsert all breakpoints. (Those which were symbolic have
1230 been reset to the proper address in the new a.out, thanks
1777feb0 1231 to symbol_file_command...). */
c906108c
SS
1232 insert_breakpoints ();
1233
1234 /* The next resume of this inferior should bring it to the shlib
1235 startup breakpoints. (If the user had also set bp's on
1236 "main" from the old (parent) process, then they'll auto-
1777feb0 1237 matically get reset there in the new process.). */
c906108c
SS
1238}
1239
c2829269
PA
1240/* The queue of threads that need to do a step-over operation to get
1241 past e.g., a breakpoint. What technique is used to step over the
1242 breakpoint/watchpoint does not matter -- all threads end up in the
1243 same queue, to maintain rough temporal order of execution, in order
1244 to avoid starvation, otherwise, we could e.g., find ourselves
1245 constantly stepping the same couple threads past their breakpoints
1246 over and over, if the single-step finish fast enough. */
1247struct thread_info *step_over_queue_head;
1248
6c4cfb24
PA
1249/* Bit flags indicating what the thread needs to step over. */
1250
8d297bbf 1251enum step_over_what_flag
6c4cfb24
PA
1252 {
1253 /* Step over a breakpoint. */
1254 STEP_OVER_BREAKPOINT = 1,
1255
1256 /* Step past a non-continuable watchpoint, in order to let the
1257 instruction execute so we can evaluate the watchpoint
1258 expression. */
1259 STEP_OVER_WATCHPOINT = 2
1260 };
8d297bbf 1261DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1262
963f9c80 1263/* Info about an instruction that is being stepped over. */
31e77af2
PA
1264
1265struct step_over_info
1266{
963f9c80
PA
1267 /* If we're stepping past a breakpoint, this is the address space
1268 and address of the instruction the breakpoint is set at. We'll
1269 skip inserting all breakpoints here. Valid iff ASPACE is
1270 non-NULL. */
8b86c959 1271 const address_space *aspace;
31e77af2 1272 CORE_ADDR address;
963f9c80
PA
1273
1274 /* The instruction being stepped over triggers a nonsteppable
1275 watchpoint. If true, we'll skip inserting watchpoints. */
1276 int nonsteppable_watchpoint_p;
21edc42f
YQ
1277
1278 /* The thread's global number. */
1279 int thread;
31e77af2
PA
1280};
1281
1282/* The step-over info of the location that is being stepped over.
1283
1284 Note that with async/breakpoint always-inserted mode, a user might
1285 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1286 being stepped over. As setting a new breakpoint inserts all
1287 breakpoints, we need to make sure the breakpoint being stepped over
1288 isn't inserted then. We do that by only clearing the step-over
1289 info when the step-over is actually finished (or aborted).
1290
1291 Presently GDB can only step over one breakpoint at any given time.
1292 Given threads that can't run code in the same address space as the
1293 breakpoint's can't really miss the breakpoint, GDB could be taught
1294 to step-over at most one breakpoint per address space (so this info
1295 could move to the address space object if/when GDB is extended).
1296 The set of breakpoints being stepped over will normally be much
1297 smaller than the set of all breakpoints, so a flag in the
1298 breakpoint location structure would be wasteful. A separate list
1299 also saves complexity and run-time, as otherwise we'd have to go
1300 through all breakpoint locations clearing their flag whenever we
1301 start a new sequence. Similar considerations weigh against storing
1302 this info in the thread object. Plus, not all step overs actually
1303 have breakpoint locations -- e.g., stepping past a single-step
1304 breakpoint, or stepping to complete a non-continuable
1305 watchpoint. */
1306static struct step_over_info step_over_info;
1307
1308/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1309 stepping over.
1310 N.B. We record the aspace and address now, instead of say just the thread,
1311 because when we need the info later the thread may be running. */
31e77af2
PA
1312
1313static void
8b86c959 1314set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1315 int nonsteppable_watchpoint_p,
1316 int thread)
31e77af2
PA
1317{
1318 step_over_info.aspace = aspace;
1319 step_over_info.address = address;
963f9c80 1320 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1321 step_over_info.thread = thread;
31e77af2
PA
1322}
1323
1324/* Called when we're not longer stepping over a breakpoint / an
1325 instruction, so all breakpoints are free to be (re)inserted. */
1326
1327static void
1328clear_step_over_info (void)
1329{
1eb8556f 1330 infrun_debug_printf ("clearing step over info");
31e77af2
PA
1331 step_over_info.aspace = NULL;
1332 step_over_info.address = 0;
963f9c80 1333 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1334 step_over_info.thread = -1;
31e77af2
PA
1335}
1336
7f89fd65 1337/* See infrun.h. */
31e77af2
PA
1338
1339int
1340stepping_past_instruction_at (struct address_space *aspace,
1341 CORE_ADDR address)
1342{
1343 return (step_over_info.aspace != NULL
1344 && breakpoint_address_match (aspace, address,
1345 step_over_info.aspace,
1346 step_over_info.address));
1347}
1348
963f9c80
PA
1349/* See infrun.h. */
1350
21edc42f
YQ
1351int
1352thread_is_stepping_over_breakpoint (int thread)
1353{
1354 return (step_over_info.thread != -1
1355 && thread == step_over_info.thread);
1356}
1357
1358/* See infrun.h. */
1359
963f9c80
PA
1360int
1361stepping_past_nonsteppable_watchpoint (void)
1362{
1363 return step_over_info.nonsteppable_watchpoint_p;
1364}
1365
6cc83d2a
PA
1366/* Returns true if step-over info is valid. */
1367
c4464ade 1368static bool
6cc83d2a
PA
1369step_over_info_valid_p (void)
1370{
963f9c80
PA
1371 return (step_over_info.aspace != NULL
1372 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1373}
1374
c906108c 1375\f
237fc4c9
PA
1376/* Displaced stepping. */
1377
1378/* In non-stop debugging mode, we must take special care to manage
1379 breakpoints properly; in particular, the traditional strategy for
1380 stepping a thread past a breakpoint it has hit is unsuitable.
1381 'Displaced stepping' is a tactic for stepping one thread past a
1382 breakpoint it has hit while ensuring that other threads running
1383 concurrently will hit the breakpoint as they should.
1384
1385 The traditional way to step a thread T off a breakpoint in a
1386 multi-threaded program in all-stop mode is as follows:
1387
1388 a0) Initially, all threads are stopped, and breakpoints are not
1389 inserted.
1390 a1) We single-step T, leaving breakpoints uninserted.
1391 a2) We insert breakpoints, and resume all threads.
1392
1393 In non-stop debugging, however, this strategy is unsuitable: we
1394 don't want to have to stop all threads in the system in order to
1395 continue or step T past a breakpoint. Instead, we use displaced
1396 stepping:
1397
1398 n0) Initially, T is stopped, other threads are running, and
1399 breakpoints are inserted.
1400 n1) We copy the instruction "under" the breakpoint to a separate
1401 location, outside the main code stream, making any adjustments
1402 to the instruction, register, and memory state as directed by
1403 T's architecture.
1404 n2) We single-step T over the instruction at its new location.
1405 n3) We adjust the resulting register and memory state as directed
1406 by T's architecture. This includes resetting T's PC to point
1407 back into the main instruction stream.
1408 n4) We resume T.
1409
1410 This approach depends on the following gdbarch methods:
1411
1412 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1413 indicate where to copy the instruction, and how much space must
1414 be reserved there. We use these in step n1.
1415
1416 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1417 address, and makes any necessary adjustments to the instruction,
1418 register contents, and memory. We use this in step n1.
1419
1420 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1421 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1422 same effect the instruction would have had if we had executed it
1423 at its original address. We use this in step n3.
1424
237fc4c9
PA
1425 The gdbarch_displaced_step_copy_insn and
1426 gdbarch_displaced_step_fixup functions must be written so that
1427 copying an instruction with gdbarch_displaced_step_copy_insn,
1428 single-stepping across the copied instruction, and then applying
1429 gdbarch_displaced_insn_fixup should have the same effects on the
1430 thread's memory and registers as stepping the instruction in place
1431 would have. Exactly which responsibilities fall to the copy and
1432 which fall to the fixup is up to the author of those functions.
1433
1434 See the comments in gdbarch.sh for details.
1435
1436 Note that displaced stepping and software single-step cannot
1437 currently be used in combination, although with some care I think
1438 they could be made to. Software single-step works by placing
1439 breakpoints on all possible subsequent instructions; if the
1440 displaced instruction is a PC-relative jump, those breakpoints
1441 could fall in very strange places --- on pages that aren't
1442 executable, or at addresses that are not proper instruction
1443 boundaries. (We do generally let other threads run while we wait
1444 to hit the software single-step breakpoint, and they might
1445 encounter such a corrupted instruction.) One way to work around
1446 this would be to have gdbarch_displaced_step_copy_insn fully
1447 simulate the effect of PC-relative instructions (and return NULL)
1448 on architectures that use software single-stepping.
1449
1450 In non-stop mode, we can have independent and simultaneous step
1451 requests, so more than one thread may need to simultaneously step
1452 over a breakpoint. The current implementation assumes there is
1453 only one scratch space per process. In this case, we have to
1454 serialize access to the scratch space. If thread A wants to step
1455 over a breakpoint, but we are currently waiting for some other
1456 thread to complete a displaced step, we leave thread A stopped and
1457 place it in the displaced_step_request_queue. Whenever a displaced
1458 step finishes, we pick the next thread in the queue and start a new
1459 displaced step operation on it. See displaced_step_prepare and
1460 displaced_step_fixup for details. */
1461
cfba9872
SM
1462/* Default destructor for displaced_step_closure. */
1463
1464displaced_step_closure::~displaced_step_closure () = default;
1465
372316f1
PA
1466/* Returns true if any inferior has a thread doing a displaced
1467 step. */
1468
39a36629
SM
1469static bool
1470displaced_step_in_progress_any_inferior ()
372316f1 1471{
d20172fc 1472 for (inferior *i : all_inferiors ())
39a36629 1473 {
d20172fc 1474 if (i->displaced_step_state.step_thread != nullptr)
39a36629
SM
1475 return true;
1476 }
372316f1 1477
39a36629 1478 return false;
372316f1
PA
1479}
1480
a46d1843 1481/* Return true if THREAD is doing a displaced step. */
c0987663 1482
c4464ade 1483static bool
00431a78 1484displaced_step_in_progress_thread (thread_info *thread)
c0987663 1485{
00431a78 1486 gdb_assert (thread != NULL);
c0987663 1487
f5f01699 1488 return thread->inf->displaced_step_state.step_thread == thread;
c0987663
YQ
1489}
1490
a46d1843 1491/* Return true if INF has a thread doing a displaced step. */
8f572e5c 1492
c4464ade 1493static bool
00431a78 1494displaced_step_in_progress (inferior *inf)
8f572e5c 1495{
f5f01699 1496 return inf->displaced_step_state.step_thread != nullptr;
fc1cf338
PA
1497}
1498
a42244db
YQ
1499/* If inferior is in displaced stepping, and ADDR equals to starting address
1500 of copy area, return corresponding displaced_step_closure. Otherwise,
1501 return NULL. */
1502
1503struct displaced_step_closure*
1504get_displaced_step_closure_by_addr (CORE_ADDR addr)
1505{
f5f01699
SM
1506 displaced_step_inferior_state &displaced
1507 = current_inferior ()->displaced_step_state;
a42244db
YQ
1508
1509 /* If checking the mode of displaced instruction in copy area. */
f5f01699
SM
1510 if (displaced.step_thread != nullptr
1511 && displaced.step_copy == addr)
1512 return displaced.step_closure.get ();
a42244db
YQ
1513
1514 return NULL;
1515}
1516
fc1cf338
PA
1517static void
1518infrun_inferior_exit (struct inferior *inf)
1519{
d20172fc 1520 inf->displaced_step_state.reset ();
fc1cf338 1521}
237fc4c9 1522
3b7a962d
SM
1523static void
1524infrun_inferior_execd (inferior *inf)
1525{
1526 /* If a thread was doing a displaced step in this inferior at the moment of
1527 the exec, it no longer exists. Even if the exec'ing thread was the one
1528 doing a displaced step, we don't want to to any fixup nor restore displaced
1529 stepping buffer bytes. */
1530 inf->displaced_step_state.reset ();
1531
1532 /* Since an in-line step is done with everything else stopped, if there was
1533 one in progress at the time of the exec, it must have been the exec'ing
1534 thread. */
1535 clear_step_over_info ();
1536}
1537
fff08868
HZ
1538/* If ON, and the architecture supports it, GDB will use displaced
1539 stepping to step over breakpoints. If OFF, or if the architecture
1540 doesn't support it, GDB will instead use the traditional
1541 hold-and-step approach. If AUTO (which is the default), GDB will
1542 decide which technique to use to step over breakpoints depending on
9822cb57 1543 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1544
72d0e2c5 1545static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1546
237fc4c9
PA
1547static void
1548show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1549 struct cmd_list_element *c,
1550 const char *value)
1551{
72d0e2c5 1552 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1553 fprintf_filtered (file,
1554 _("Debugger's willingness to use displaced stepping "
1555 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1556 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1557 else
3e43a32a
MS
1558 fprintf_filtered (file,
1559 _("Debugger's willingness to use displaced stepping "
1560 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1561}
1562
9822cb57
SM
1563/* Return true if the gdbarch implements the required methods to use
1564 displaced stepping. */
1565
1566static bool
1567gdbarch_supports_displaced_stepping (gdbarch *arch)
1568{
1569 /* Only check for the presence of step_copy_insn. Other required methods
1570 are checked by the gdbarch validation. */
1571 return gdbarch_displaced_step_copy_insn_p (arch);
1572}
1573
fff08868 1574/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1575 over breakpoints of thread TP. */
fff08868 1576
9822cb57
SM
1577static bool
1578use_displaced_stepping (thread_info *tp)
237fc4c9 1579{
9822cb57
SM
1580 /* If the user disabled it explicitly, don't use displaced stepping. */
1581 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1582 return false;
1583
1584 /* If "auto", only use displaced stepping if the target operates in a non-stop
1585 way. */
1586 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1587 && !target_is_non_stop_p ())
1588 return false;
1589
1590 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1591
1592 /* If the architecture doesn't implement displaced stepping, don't use
1593 it. */
1594 if (!gdbarch_supports_displaced_stepping (gdbarch))
1595 return false;
1596
1597 /* If recording, don't use displaced stepping. */
1598 if (find_record_target () != nullptr)
1599 return false;
1600
9822cb57
SM
1601 /* If displaced stepping failed before for this inferior, don't bother trying
1602 again. */
f5f01699 1603 if (tp->inf->displaced_step_state.failed_before)
9822cb57
SM
1604 return false;
1605
1606 return true;
237fc4c9
PA
1607}
1608
d8d83535
SM
1609/* Simple function wrapper around displaced_step_inferior_state::reset. */
1610
237fc4c9 1611static void
d8d83535 1612displaced_step_reset (displaced_step_inferior_state *displaced)
237fc4c9 1613{
d8d83535 1614 displaced->reset ();
237fc4c9
PA
1615}
1616
d8d83535
SM
1617/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1618 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1619
1620using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9 1621
136821d9
SM
1622/* See infrun.h. */
1623
1624std::string
1625displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
237fc4c9 1626{
136821d9 1627 std::string ret;
237fc4c9 1628
136821d9
SM
1629 for (size_t i = 0; i < len; i++)
1630 {
1631 if (i == 0)
1632 ret += string_printf ("%02x", buf[i]);
1633 else
1634 ret += string_printf (" %02x", buf[i]);
1635 }
1636
1637 return ret;
237fc4c9
PA
1638}
1639
1640/* Prepare to single-step, using displaced stepping.
1641
1642 Note that we cannot use displaced stepping when we have a signal to
1643 deliver. If we have a signal to deliver and an instruction to step
1644 over, then after the step, there will be no indication from the
1645 target whether the thread entered a signal handler or ignored the
1646 signal and stepped over the instruction successfully --- both cases
1647 result in a simple SIGTRAP. In the first case we mustn't do a
1648 fixup, and in the second case we must --- but we can't tell which.
1649 Comments in the code for 'random signals' in handle_inferior_event
1650 explain how we handle this case instead.
1651
1652 Returns 1 if preparing was successful -- this thread is going to be
7f03bd92
PA
1653 stepped now; 0 if displaced stepping this thread got queued; or -1
1654 if this instruction can't be displaced stepped. */
1655
237fc4c9 1656static int
00431a78 1657displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1658{
00431a78 1659 regcache *regcache = get_thread_regcache (tp);
ac7936df 1660 struct gdbarch *gdbarch = regcache->arch ();
8b86c959 1661 const address_space *aspace = regcache->aspace ();
237fc4c9
PA
1662 CORE_ADDR original, copy;
1663 ULONGEST len;
9e529e1d 1664 int status;
237fc4c9
PA
1665
1666 /* We should never reach this function if the architecture does not
1667 support displaced stepping. */
9822cb57 1668 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1669
c2829269
PA
1670 /* Nor if the thread isn't meant to step over a breakpoint. */
1671 gdb_assert (tp->control.trap_expected);
1672
c1e36e3e
PA
1673 /* Disable range stepping while executing in the scratch pad. We
1674 want a single-step even if executing the displaced instruction in
1675 the scratch buffer lands within the stepping range (e.g., a
1676 jump/branch). */
1677 tp->control.may_range_step = 0;
1678
fc1cf338
PA
1679 /* We have to displaced step one thread at a time, as we only have
1680 access to a single scratch space per inferior. */
237fc4c9 1681
f5f01699 1682 displaced_step_inferior_state *displaced = &tp->inf->displaced_step_state;
fc1cf338 1683
00431a78 1684 if (displaced->step_thread != nullptr)
237fc4c9
PA
1685 {
1686 /* Already waiting for a displaced step to finish. Defer this
1687 request and place in queue. */
237fc4c9 1688
136821d9
SM
1689 displaced_debug_printf ("deferring step of %s",
1690 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1691
c2829269 1692 thread_step_over_chain_enqueue (tp);
237fc4c9
PA
1693 return 0;
1694 }
1695 else
136821d9 1696 displaced_debug_printf ("stepping %s now",
a068643d 1697 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1698
d8d83535 1699 displaced_step_reset (displaced);
237fc4c9 1700
00431a78
PA
1701 scoped_restore_current_thread restore_thread;
1702
1703 switch_to_thread (tp);
ad53cd71 1704
515630c5 1705 original = regcache_read_pc (regcache);
237fc4c9
PA
1706
1707 copy = gdbarch_displaced_step_location (gdbarch);
1708 len = gdbarch_max_insn_length (gdbarch);
1709
d35ae833
PA
1710 if (breakpoint_in_range_p (aspace, copy, len))
1711 {
1712 /* There's a breakpoint set in the scratch pad location range
1713 (which is usually around the entry point). We'd either
1714 install it before resuming, which would overwrite/corrupt the
1715 scratch pad, or if it was already inserted, this displaced
1716 step would overwrite it. The latter is OK in the sense that
1717 we already assume that no thread is going to execute the code
1718 in the scratch pad range (after initial startup) anyway, but
1719 the former is unacceptable. Simply punt and fallback to
1720 stepping over this breakpoint in-line. */
136821d9
SM
1721 displaced_debug_printf ("breakpoint set in scratch pad. "
1722 "Stepping over breakpoint in-line instead.");
d35ae833 1723
d35ae833
PA
1724 return -1;
1725 }
1726
237fc4c9 1727 /* Save the original contents of the copy area. */
d20172fc
SM
1728 displaced->step_saved_copy.resize (len);
1729 status = target_read_memory (copy, displaced->step_saved_copy.data (), len);
9e529e1d
JK
1730 if (status != 0)
1731 throw_error (MEMORY_ERROR,
1732 _("Error accessing memory address %s (%s) for "
1733 "displaced-stepping scratch space."),
1734 paddress (gdbarch, copy), safe_strerror (status));
136821d9
SM
1735
1736 displaced_debug_printf ("saved %s: %s",
1737 paddress (gdbarch, copy),
1738 displaced_step_dump_bytes
1739 (displaced->step_saved_copy.data (), len).c_str ());
237fc4c9 1740
e8217e61
SM
1741 displaced->step_closure
1742 = gdbarch_displaced_step_copy_insn (gdbarch, original, copy, regcache);
1743 if (displaced->step_closure == NULL)
7f03bd92
PA
1744 {
1745 /* The architecture doesn't know how or want to displaced step
1746 this instruction or instruction sequence. Fallback to
1747 stepping over the breakpoint in-line. */
7f03bd92
PA
1748 return -1;
1749 }
237fc4c9 1750
9f5a595d
UW
1751 /* Save the information we need to fix things up if the step
1752 succeeds. */
00431a78 1753 displaced->step_thread = tp;
fc1cf338 1754 displaced->step_gdbarch = gdbarch;
fc1cf338
PA
1755 displaced->step_original = original;
1756 displaced->step_copy = copy;
9f5a595d 1757
9799571e 1758 {
d8d83535 1759 displaced_step_reset_cleanup cleanup (displaced);
237fc4c9 1760
9799571e
TT
1761 /* Resume execution at the copy. */
1762 regcache_write_pc (regcache, copy);
237fc4c9 1763
9799571e
TT
1764 cleanup.release ();
1765 }
ad53cd71 1766
136821d9 1767 displaced_debug_printf ("displaced pc to %s", paddress (gdbarch, copy));
237fc4c9 1768
237fc4c9
PA
1769 return 1;
1770}
1771
3fc8eb30
PA
1772/* Wrapper for displaced_step_prepare_throw that disabled further
1773 attempts at displaced stepping if we get a memory error. */
1774
1775static int
00431a78 1776displaced_step_prepare (thread_info *thread)
3fc8eb30
PA
1777{
1778 int prepared = -1;
1779
a70b8144 1780 try
3fc8eb30 1781 {
00431a78 1782 prepared = displaced_step_prepare_throw (thread);
3fc8eb30 1783 }
230d2906 1784 catch (const gdb_exception_error &ex)
3fc8eb30 1785 {
16b41842
PA
1786 if (ex.error != MEMORY_ERROR
1787 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1788 throw;
3fc8eb30 1789
1eb8556f
SM
1790 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1791 ex.what ());
3fc8eb30
PA
1792
1793 /* Be verbose if "set displaced-stepping" is "on", silent if
1794 "auto". */
1795 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1796 {
fd7dcb94 1797 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1798 ex.what ());
3fc8eb30
PA
1799 }
1800
1801 /* Disable further displaced stepping attempts. */
f5f01699 1802 thread->inf->displaced_step_state.failed_before = 1;
3fc8eb30 1803 }
3fc8eb30
PA
1804
1805 return prepared;
1806}
1807
237fc4c9 1808static void
3e43a32a
MS
1809write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1810 const gdb_byte *myaddr, int len)
237fc4c9 1811{
2989a365 1812 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
abbb1732 1813
237fc4c9
PA
1814 inferior_ptid = ptid;
1815 write_memory (memaddr, myaddr, len);
237fc4c9
PA
1816}
1817
e2d96639
YQ
1818/* Restore the contents of the copy area for thread PTID. */
1819
1820static void
1821displaced_step_restore (struct displaced_step_inferior_state *displaced,
1822 ptid_t ptid)
1823{
1824 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1825
1826 write_memory_ptid (ptid, displaced->step_copy,
d20172fc 1827 displaced->step_saved_copy.data (), len);
136821d9
SM
1828
1829 displaced_debug_printf ("restored %s %s",
1830 target_pid_to_str (ptid).c_str (),
1831 paddress (displaced->step_gdbarch,
1832 displaced->step_copy));
e2d96639
YQ
1833}
1834
372316f1
PA
1835/* If we displaced stepped an instruction successfully, adjust
1836 registers and memory to yield the same effect the instruction would
1837 have had if we had executed it at its original address, and return
1838 1. If the instruction didn't complete, relocate the PC and return
1839 -1. If the thread wasn't displaced stepping, return 0. */
1840
1841static int
00431a78 1842displaced_step_fixup (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1843{
f5f01699
SM
1844 displaced_step_inferior_state *displaced
1845 = &event_thread->inf->displaced_step_state;
372316f1 1846 int ret;
fc1cf338 1847
00431a78
PA
1848 /* Was this event for the thread we displaced? */
1849 if (displaced->step_thread != event_thread)
372316f1 1850 return 0;
237fc4c9 1851
cb71640d
PA
1852 /* Fixup may need to read memory/registers. Switch to the thread
1853 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d
TBA
1854 the current thread, and displaced_step_restore performs ptid-dependent
1855 memory accesses using current_inferior() and current_top_target(). */
00431a78 1856 switch_to_thread (event_thread);
cb71640d 1857
d43b7a2d
TBA
1858 displaced_step_reset_cleanup cleanup (displaced);
1859
1860 displaced_step_restore (displaced, displaced->step_thread->ptid);
1861
237fc4c9 1862 /* Did the instruction complete successfully? */
cb71640d
PA
1863 if (signal == GDB_SIGNAL_TRAP
1864 && !(target_stopped_by_watchpoint ()
1865 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
9aed480c 1866 || target_have_steppable_watchpoint ())))
237fc4c9
PA
1867 {
1868 /* Fix up the resulting state. */
fc1cf338 1869 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
dda83cd7
SM
1870 displaced->step_closure.get (),
1871 displaced->step_original,
1872 displaced->step_copy,
1873 get_thread_regcache (displaced->step_thread));
372316f1 1874 ret = 1;
237fc4c9
PA
1875 }
1876 else
1877 {
1878 /* Since the instruction didn't complete, all we can do is
dda83cd7 1879 relocate the PC. */
00431a78 1880 struct regcache *regcache = get_thread_regcache (event_thread);
515630c5 1881 CORE_ADDR pc = regcache_read_pc (regcache);
abbb1732 1882
fc1cf338 1883 pc = displaced->step_original + (pc - displaced->step_copy);
515630c5 1884 regcache_write_pc (regcache, pc);
372316f1 1885 ret = -1;
237fc4c9
PA
1886 }
1887
372316f1 1888 return ret;
c2829269 1889}
1c5cfe86 1890
4d9d9d04
PA
1891/* Data to be passed around while handling an event. This data is
1892 discarded between events. */
1893struct execution_control_state
1894{
5b6d1e4f 1895 process_stratum_target *target;
4d9d9d04
PA
1896 ptid_t ptid;
1897 /* The thread that got the event, if this was a thread event; NULL
1898 otherwise. */
1899 struct thread_info *event_thread;
1900
1901 struct target_waitstatus ws;
1902 int stop_func_filled_in;
1903 CORE_ADDR stop_func_start;
1904 CORE_ADDR stop_func_end;
1905 const char *stop_func_name;
1906 int wait_some_more;
1907
1908 /* True if the event thread hit the single-step breakpoint of
1909 another thread. Thus the event doesn't cause a stop, the thread
1910 needs to be single-stepped past the single-step breakpoint before
1911 we can switch back to the original stepping thread. */
1912 int hit_singlestep_breakpoint;
1913};
1914
1915/* Clear ECS and set it to point at TP. */
c2829269
PA
1916
1917static void
4d9d9d04
PA
1918reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1919{
1920 memset (ecs, 0, sizeof (*ecs));
1921 ecs->event_thread = tp;
1922 ecs->ptid = tp->ptid;
1923}
1924
1925static void keep_going_pass_signal (struct execution_control_state *ecs);
1926static void prepare_to_wait (struct execution_control_state *ecs);
c4464ade 1927static bool keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1928static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1929
1930/* Are there any pending step-over requests? If so, run all we can
1931 now and return true. Otherwise, return false. */
1932
c4464ade 1933static bool
c2829269
PA
1934start_step_over (void)
1935{
1936 struct thread_info *tp, *next;
1937
372316f1
PA
1938 /* Don't start a new step-over if we already have an in-line
1939 step-over operation ongoing. */
1940 if (step_over_info_valid_p ())
c4464ade 1941 return false;
372316f1 1942
c2829269 1943 for (tp = step_over_queue_head; tp != NULL; tp = next)
237fc4c9 1944 {
4d9d9d04
PA
1945 struct execution_control_state ecss;
1946 struct execution_control_state *ecs = &ecss;
8d297bbf 1947 step_over_what step_what;
372316f1 1948 int must_be_in_line;
c2829269 1949
c65d6b55
PA
1950 gdb_assert (!tp->stop_requested);
1951
c2829269 1952 next = thread_step_over_chain_next (tp);
237fc4c9 1953
c2829269
PA
1954 /* If this inferior already has a displaced step in process,
1955 don't start a new one. */
00431a78 1956 if (displaced_step_in_progress (tp->inf))
c2829269
PA
1957 continue;
1958
372316f1
PA
1959 step_what = thread_still_needs_step_over (tp);
1960 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1961 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1962 && !use_displaced_stepping (tp)));
372316f1
PA
1963
1964 /* We currently stop all threads of all processes to step-over
1965 in-line. If we need to start a new in-line step-over, let
1966 any pending displaced steps finish first. */
1967 if (must_be_in_line && displaced_step_in_progress_any_inferior ())
c4464ade 1968 return false;
372316f1 1969
c2829269
PA
1970 thread_step_over_chain_remove (tp);
1971
1972 if (step_over_queue_head == NULL)
1eb8556f 1973 infrun_debug_printf ("step-over queue now empty");
c2829269 1974
372316f1
PA
1975 if (tp->control.trap_expected
1976 || tp->resumed
1977 || tp->executing)
ad53cd71 1978 {
4d9d9d04
PA
1979 internal_error (__FILE__, __LINE__,
1980 "[%s] has inconsistent state: "
372316f1 1981 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 1982 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 1983 tp->control.trap_expected,
372316f1 1984 tp->resumed,
4d9d9d04 1985 tp->executing);
ad53cd71 1986 }
1c5cfe86 1987
1eb8556f
SM
1988 infrun_debug_printf ("resuming [%s] for step-over",
1989 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
1990
1991 /* keep_going_pass_signal skips the step-over if the breakpoint
1992 is no longer inserted. In all-stop, we want to keep looking
1993 for a thread that needs a step-over instead of resuming TP,
1994 because we wouldn't be able to resume anything else until the
1995 target stops again. In non-stop, the resume always resumes
1996 only TP, so it's OK to let the thread resume freely. */
fbea99ea 1997 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 1998 continue;
8550d3b3 1999
00431a78 2000 switch_to_thread (tp);
4d9d9d04
PA
2001 reset_ecs (ecs, tp);
2002 keep_going_pass_signal (ecs);
1c5cfe86 2003
4d9d9d04
PA
2004 if (!ecs->wait_some_more)
2005 error (_("Command aborted."));
1c5cfe86 2006
372316f1
PA
2007 gdb_assert (tp->resumed);
2008
2009 /* If we started a new in-line step-over, we're done. */
2010 if (step_over_info_valid_p ())
2011 {
2012 gdb_assert (tp->control.trap_expected);
c4464ade 2013 return true;
372316f1
PA
2014 }
2015
fbea99ea 2016 if (!target_is_non_stop_p ())
4d9d9d04
PA
2017 {
2018 /* On all-stop, shouldn't have resumed unless we needed a
2019 step over. */
2020 gdb_assert (tp->control.trap_expected
2021 || tp->step_after_step_resume_breakpoint);
2022
2023 /* With remote targets (at least), in all-stop, we can't
2024 issue any further remote commands until the program stops
2025 again. */
c4464ade 2026 return true;
1c5cfe86 2027 }
c2829269 2028
4d9d9d04
PA
2029 /* Either the thread no longer needed a step-over, or a new
2030 displaced stepping sequence started. Even in the latter
2031 case, continue looking. Maybe we can also start another
2032 displaced step on a thread of other process. */
237fc4c9 2033 }
4d9d9d04 2034
c4464ade 2035 return false;
237fc4c9
PA
2036}
2037
5231c1fd
PA
2038/* Update global variables holding ptids to hold NEW_PTID if they were
2039 holding OLD_PTID. */
2040static void
b161a60d
SM
2041infrun_thread_ptid_changed (process_stratum_target *target,
2042 ptid_t old_ptid, ptid_t new_ptid)
5231c1fd 2043{
b161a60d
SM
2044 if (inferior_ptid == old_ptid
2045 && current_inferior ()->process_target () == target)
5231c1fd 2046 inferior_ptid = new_ptid;
5231c1fd
PA
2047}
2048
237fc4c9 2049\f
c906108c 2050
53904c9e
AC
2051static const char schedlock_off[] = "off";
2052static const char schedlock_on[] = "on";
2053static const char schedlock_step[] = "step";
f2665db5 2054static const char schedlock_replay[] = "replay";
40478521 2055static const char *const scheduler_enums[] = {
ef346e04
AC
2056 schedlock_off,
2057 schedlock_on,
2058 schedlock_step,
f2665db5 2059 schedlock_replay,
ef346e04
AC
2060 NULL
2061};
f2665db5 2062static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2063static void
2064show_scheduler_mode (struct ui_file *file, int from_tty,
2065 struct cmd_list_element *c, const char *value)
2066{
3e43a32a
MS
2067 fprintf_filtered (file,
2068 _("Mode for locking scheduler "
2069 "during execution is \"%s\".\n"),
920d2a44
AC
2070 value);
2071}
c906108c
SS
2072
2073static void
eb4c3f4a 2074set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2075{
8a3ecb79 2076 if (!target_can_lock_scheduler ())
eefe576e
AC
2077 {
2078 scheduler_mode = schedlock_off;
2079 error (_("Target '%s' cannot support this command."), target_shortname);
2080 }
c906108c
SS
2081}
2082
d4db2f36
PA
2083/* True if execution commands resume all threads of all processes by
2084 default; otherwise, resume only threads of the current inferior
2085 process. */
491144b5 2086bool sched_multi = false;
d4db2f36 2087
2facfe5c 2088/* Try to setup for software single stepping over the specified location.
c4464ade 2089 Return true if target_resume() should use hardware single step.
2facfe5c
DD
2090
2091 GDBARCH the current gdbarch.
2092 PC the location to step over. */
2093
c4464ade 2094static bool
2facfe5c
DD
2095maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2096{
c4464ade 2097 bool hw_step = true;
2facfe5c 2098
f02253f1 2099 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2100 && gdbarch_software_single_step_p (gdbarch))
2101 hw_step = !insert_single_step_breakpoints (gdbarch);
2102
2facfe5c
DD
2103 return hw_step;
2104}
c906108c 2105
f3263aa4
PA
2106/* See infrun.h. */
2107
09cee04b
PA
2108ptid_t
2109user_visible_resume_ptid (int step)
2110{
f3263aa4 2111 ptid_t resume_ptid;
09cee04b 2112
09cee04b
PA
2113 if (non_stop)
2114 {
2115 /* With non-stop mode on, threads are always handled
2116 individually. */
2117 resume_ptid = inferior_ptid;
2118 }
2119 else if ((scheduler_mode == schedlock_on)
03d46957 2120 || (scheduler_mode == schedlock_step && step))
09cee04b 2121 {
f3263aa4
PA
2122 /* User-settable 'scheduler' mode requires solo thread
2123 resume. */
09cee04b
PA
2124 resume_ptid = inferior_ptid;
2125 }
f2665db5
MM
2126 else if ((scheduler_mode == schedlock_replay)
2127 && target_record_will_replay (minus_one_ptid, execution_direction))
2128 {
2129 /* User-settable 'scheduler' mode requires solo thread resume in replay
2130 mode. */
2131 resume_ptid = inferior_ptid;
2132 }
f3263aa4
PA
2133 else if (!sched_multi && target_supports_multi_process ())
2134 {
2135 /* Resume all threads of the current process (and none of other
2136 processes). */
e99b03dc 2137 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2138 }
2139 else
2140 {
2141 /* Resume all threads of all processes. */
2142 resume_ptid = RESUME_ALL;
2143 }
09cee04b
PA
2144
2145 return resume_ptid;
2146}
2147
5b6d1e4f
PA
2148/* See infrun.h. */
2149
2150process_stratum_target *
2151user_visible_resume_target (ptid_t resume_ptid)
2152{
2153 return (resume_ptid == minus_one_ptid && sched_multi
2154 ? NULL
2155 : current_inferior ()->process_target ());
2156}
2157
fbea99ea
PA
2158/* Return a ptid representing the set of threads that we will resume,
2159 in the perspective of the target, assuming run control handling
2160 does not require leaving some threads stopped (e.g., stepping past
2161 breakpoint). USER_STEP indicates whether we're about to start the
2162 target for a stepping command. */
2163
2164static ptid_t
2165internal_resume_ptid (int user_step)
2166{
2167 /* In non-stop, we always control threads individually. Note that
2168 the target may always work in non-stop mode even with "set
2169 non-stop off", in which case user_visible_resume_ptid could
2170 return a wildcard ptid. */
2171 if (target_is_non_stop_p ())
2172 return inferior_ptid;
2173 else
2174 return user_visible_resume_ptid (user_step);
2175}
2176
64ce06e4
PA
2177/* Wrapper for target_resume, that handles infrun-specific
2178 bookkeeping. */
2179
2180static void
c4464ade 2181do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
64ce06e4
PA
2182{
2183 struct thread_info *tp = inferior_thread ();
2184
c65d6b55
PA
2185 gdb_assert (!tp->stop_requested);
2186
64ce06e4 2187 /* Install inferior's terminal modes. */
223ffa71 2188 target_terminal::inferior ();
64ce06e4
PA
2189
2190 /* Avoid confusing the next resume, if the next stop/resume
2191 happens to apply to another thread. */
2192 tp->suspend.stop_signal = GDB_SIGNAL_0;
2193
8f572e5c
PA
2194 /* Advise target which signals may be handled silently.
2195
2196 If we have removed breakpoints because we are stepping over one
2197 in-line (in any thread), we need to receive all signals to avoid
2198 accidentally skipping a breakpoint during execution of a signal
2199 handler.
2200
2201 Likewise if we're displaced stepping, otherwise a trap for a
2202 breakpoint in a signal handler might be confused with the
2203 displaced step finishing. We don't make the displaced_step_fixup
2204 step distinguish the cases instead, because:
2205
2206 - a backtrace while stopped in the signal handler would show the
2207 scratch pad as frame older than the signal handler, instead of
2208 the real mainline code.
2209
2210 - when the thread is later resumed, the signal handler would
2211 return to the scratch pad area, which would no longer be
2212 valid. */
2213 if (step_over_info_valid_p ()
00431a78 2214 || displaced_step_in_progress (tp->inf))
adc6a863 2215 target_pass_signals ({});
64ce06e4 2216 else
adc6a863 2217 target_pass_signals (signal_pass);
64ce06e4
PA
2218
2219 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2220
2221 target_commit_resume ();
5b6d1e4f
PA
2222
2223 if (target_can_async_p ())
2224 target_async (1);
64ce06e4
PA
2225}
2226
d930703d 2227/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2228 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2229 call 'resume', which handles exceptions. */
c906108c 2230
71d378ae
PA
2231static void
2232resume_1 (enum gdb_signal sig)
c906108c 2233{
515630c5 2234 struct regcache *regcache = get_current_regcache ();
ac7936df 2235 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2236 struct thread_info *tp = inferior_thread ();
8b86c959 2237 const address_space *aspace = regcache->aspace ();
b0f16a3e 2238 ptid_t resume_ptid;
856e7dd6
PA
2239 /* This represents the user's step vs continue request. When
2240 deciding whether "set scheduler-locking step" applies, it's the
2241 user's intention that counts. */
2242 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2243 /* This represents what we'll actually request the target to do.
2244 This can decay from a step to a continue, if e.g., we need to
2245 implement single-stepping with breakpoints (software
2246 single-step). */
c4464ade 2247 bool step;
c7e8a53c 2248
c65d6b55 2249 gdb_assert (!tp->stop_requested);
c2829269
PA
2250 gdb_assert (!thread_is_in_step_over_chain (tp));
2251
372316f1
PA
2252 if (tp->suspend.waitstatus_pending_p)
2253 {
1eb8556f
SM
2254 infrun_debug_printf
2255 ("thread %s has pending wait "
2256 "status %s (currently_stepping=%d).",
2257 target_pid_to_str (tp->ptid).c_str (),
2258 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2259 currently_stepping (tp));
372316f1 2260
5b6d1e4f 2261 tp->inf->process_target ()->threads_executing = true;
719546c4 2262 tp->resumed = true;
372316f1
PA
2263
2264 /* FIXME: What should we do if we are supposed to resume this
2265 thread with a signal? Maybe we should maintain a queue of
2266 pending signals to deliver. */
2267 if (sig != GDB_SIGNAL_0)
2268 {
fd7dcb94 2269 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2270 gdb_signal_to_name (sig),
2271 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2272 }
2273
2274 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2275
2276 if (target_can_async_p ())
9516f85a
AB
2277 {
2278 target_async (1);
2279 /* Tell the event loop we have an event to process. */
2280 mark_async_event_handler (infrun_async_inferior_event_token);
2281 }
372316f1
PA
2282 return;
2283 }
2284
2285 tp->stepped_breakpoint = 0;
2286
6b403daa
PA
2287 /* Depends on stepped_breakpoint. */
2288 step = currently_stepping (tp);
2289
74609e71
YQ
2290 if (current_inferior ()->waiting_for_vfork_done)
2291 {
48f9886d
PA
2292 /* Don't try to single-step a vfork parent that is waiting for
2293 the child to get out of the shared memory region (by exec'ing
2294 or exiting). This is particularly important on software
2295 single-step archs, as the child process would trip on the
2296 software single step breakpoint inserted for the parent
2297 process. Since the parent will not actually execute any
2298 instruction until the child is out of the shared region (such
2299 are vfork's semantics), it is safe to simply continue it.
2300 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2301 the parent, and tell it to `keep_going', which automatically
2302 re-sets it stepping. */
1eb8556f 2303 infrun_debug_printf ("resume : clear step");
c4464ade 2304 step = false;
74609e71
YQ
2305 }
2306
7ca9b62a
TBA
2307 CORE_ADDR pc = regcache_read_pc (regcache);
2308
1eb8556f
SM
2309 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2310 "current thread [%s] at %s",
2311 step, gdb_signal_to_symbol_string (sig),
2312 tp->control.trap_expected,
2313 target_pid_to_str (inferior_ptid).c_str (),
2314 paddress (gdbarch, pc));
c906108c 2315
c2c6d25f
JM
2316 /* Normally, by the time we reach `resume', the breakpoints are either
2317 removed or inserted, as appropriate. The exception is if we're sitting
2318 at a permanent breakpoint; we need to step over it, but permanent
2319 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2320 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2321 {
af48d08f
PA
2322 if (sig != GDB_SIGNAL_0)
2323 {
2324 /* We have a signal to pass to the inferior. The resume
2325 may, or may not take us to the signal handler. If this
2326 is a step, we'll need to stop in the signal handler, if
2327 there's one, (if the target supports stepping into
2328 handlers), or in the next mainline instruction, if
2329 there's no handler. If this is a continue, we need to be
2330 sure to run the handler with all breakpoints inserted.
2331 In all cases, set a breakpoint at the current address
2332 (where the handler returns to), and once that breakpoint
2333 is hit, resume skipping the permanent breakpoint. If
2334 that breakpoint isn't hit, then we've stepped into the
2335 signal handler (or hit some other event). We'll delete
2336 the step-resume breakpoint then. */
2337
1eb8556f
SM
2338 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2339 "deliver signal first");
af48d08f
PA
2340
2341 clear_step_over_info ();
2342 tp->control.trap_expected = 0;
2343
2344 if (tp->control.step_resume_breakpoint == NULL)
2345 {
2346 /* Set a "high-priority" step-resume, as we don't want
2347 user breakpoints at PC to trigger (again) when this
2348 hits. */
2349 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2350 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2351
2352 tp->step_after_step_resume_breakpoint = step;
2353 }
2354
2355 insert_breakpoints ();
2356 }
2357 else
2358 {
2359 /* There's no signal to pass, we can go ahead and skip the
2360 permanent breakpoint manually. */
1eb8556f 2361 infrun_debug_printf ("skipping permanent breakpoint");
af48d08f
PA
2362 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2363 /* Update pc to reflect the new address from which we will
2364 execute instructions. */
2365 pc = regcache_read_pc (regcache);
2366
2367 if (step)
2368 {
2369 /* We've already advanced the PC, so the stepping part
2370 is done. Now we need to arrange for a trap to be
2371 reported to handle_inferior_event. Set a breakpoint
2372 at the current PC, and run to it. Don't update
2373 prev_pc, because if we end in
44a1ee51
PA
2374 switch_back_to_stepped_thread, we want the "expected
2375 thread advanced also" branch to be taken. IOW, we
2376 don't want this thread to step further from PC
af48d08f 2377 (overstep). */
1ac806b8 2378 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2379 insert_single_step_breakpoint (gdbarch, aspace, pc);
2380 insert_breakpoints ();
2381
fbea99ea 2382 resume_ptid = internal_resume_ptid (user_step);
c4464ade 2383 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
719546c4 2384 tp->resumed = true;
af48d08f
PA
2385 return;
2386 }
2387 }
6d350bb5 2388 }
c2c6d25f 2389
c1e36e3e
PA
2390 /* If we have a breakpoint to step over, make sure to do a single
2391 step only. Same if we have software watchpoints. */
2392 if (tp->control.trap_expected || bpstat_should_step ())
2393 tp->control.may_range_step = 0;
2394
7da6a5b9
LM
2395 /* If displaced stepping is enabled, step over breakpoints by executing a
2396 copy of the instruction at a different address.
237fc4c9
PA
2397
2398 We can't use displaced stepping when we have a signal to deliver;
2399 the comments for displaced_step_prepare explain why. The
2400 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2401 signals' explain what we do instead.
2402
2403 We can't use displaced stepping when we are waiting for vfork_done
2404 event, displaced stepping breaks the vfork child similarly as single
2405 step software breakpoint. */
3fc8eb30
PA
2406 if (tp->control.trap_expected
2407 && use_displaced_stepping (tp)
cb71640d 2408 && !step_over_info_valid_p ()
a493e3e2 2409 && sig == GDB_SIGNAL_0
74609e71 2410 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2411 {
00431a78 2412 int prepared = displaced_step_prepare (tp);
fc1cf338 2413
3fc8eb30 2414 if (prepared == 0)
d56b7306 2415 {
1eb8556f 2416 infrun_debug_printf ("Got placed in step-over queue");
4d9d9d04
PA
2417
2418 tp->control.trap_expected = 0;
d56b7306
VP
2419 return;
2420 }
3fc8eb30
PA
2421 else if (prepared < 0)
2422 {
2423 /* Fallback to stepping over the breakpoint in-line. */
2424
2425 if (target_is_non_stop_p ())
2426 stop_all_threads ();
2427
a01bda52 2428 set_step_over_info (regcache->aspace (),
21edc42f 2429 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2430
2431 step = maybe_software_singlestep (gdbarch, pc);
2432
2433 insert_breakpoints ();
2434 }
2435 else if (prepared > 0)
2436 {
3fc8eb30
PA
2437 /* Update pc to reflect the new address from which we will
2438 execute instructions due to displaced stepping. */
00431a78 2439 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2440
40a53766 2441 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
3fc8eb30 2442 }
237fc4c9
PA
2443 }
2444
2facfe5c 2445 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2446 else if (step)
2facfe5c 2447 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2448
30852783
UW
2449 /* Currently, our software single-step implementation leads to different
2450 results than hardware single-stepping in one situation: when stepping
2451 into delivering a signal which has an associated signal handler,
2452 hardware single-step will stop at the first instruction of the handler,
2453 while software single-step will simply skip execution of the handler.
2454
2455 For now, this difference in behavior is accepted since there is no
2456 easy way to actually implement single-stepping into a signal handler
2457 without kernel support.
2458
2459 However, there is one scenario where this difference leads to follow-on
2460 problems: if we're stepping off a breakpoint by removing all breakpoints
2461 and then single-stepping. In this case, the software single-step
2462 behavior means that even if there is a *breakpoint* in the signal
2463 handler, GDB still would not stop.
2464
2465 Fortunately, we can at least fix this particular issue. We detect
2466 here the case where we are about to deliver a signal while software
2467 single-stepping with breakpoints removed. In this situation, we
2468 revert the decisions to remove all breakpoints and insert single-
2469 step breakpoints, and instead we install a step-resume breakpoint
2470 at the current address, deliver the signal without stepping, and
2471 once we arrive back at the step-resume breakpoint, actually step
2472 over the breakpoint we originally wanted to step over. */
34b7e8a6 2473 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2474 && sig != GDB_SIGNAL_0
2475 && step_over_info_valid_p ())
30852783
UW
2476 {
2477 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2478 immediately after a handler returns, might already have
30852783
UW
2479 a step-resume breakpoint set on the earlier handler. We cannot
2480 set another step-resume breakpoint; just continue on until the
2481 original breakpoint is hit. */
2482 if (tp->control.step_resume_breakpoint == NULL)
2483 {
2c03e5be 2484 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2485 tp->step_after_step_resume_breakpoint = 1;
2486 }
2487
34b7e8a6 2488 delete_single_step_breakpoints (tp);
30852783 2489
31e77af2 2490 clear_step_over_info ();
30852783 2491 tp->control.trap_expected = 0;
31e77af2
PA
2492
2493 insert_breakpoints ();
30852783
UW
2494 }
2495
b0f16a3e
SM
2496 /* If STEP is set, it's a request to use hardware stepping
2497 facilities. But in that case, we should never
2498 use singlestep breakpoint. */
34b7e8a6 2499 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2500
fbea99ea 2501 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2502 if (tp->control.trap_expected)
b0f16a3e
SM
2503 {
2504 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2505 hit, either by single-stepping the thread with the breakpoint
2506 removed, or by displaced stepping, with the breakpoint inserted.
2507 In the former case, we need to single-step only this thread,
2508 and keep others stopped, as they can miss this breakpoint if
2509 allowed to run. That's not really a problem for displaced
2510 stepping, but, we still keep other threads stopped, in case
2511 another thread is also stopped for a breakpoint waiting for
2512 its turn in the displaced stepping queue. */
b0f16a3e
SM
2513 resume_ptid = inferior_ptid;
2514 }
fbea99ea
PA
2515 else
2516 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2517
7f5ef605
PA
2518 if (execution_direction != EXEC_REVERSE
2519 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2520 {
372316f1
PA
2521 /* There are two cases where we currently need to step a
2522 breakpoint instruction when we have a signal to deliver:
2523
2524 - See handle_signal_stop where we handle random signals that
2525 could take out us out of the stepping range. Normally, in
2526 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2527 signal handler with a breakpoint at PC, but there are cases
2528 where we should _always_ single-step, even if we have a
2529 step-resume breakpoint, like when a software watchpoint is
2530 set. Assuming single-stepping and delivering a signal at the
2531 same time would takes us to the signal handler, then we could
2532 have removed the breakpoint at PC to step over it. However,
2533 some hardware step targets (like e.g., Mac OS) can't step
2534 into signal handlers, and for those, we need to leave the
2535 breakpoint at PC inserted, as otherwise if the handler
2536 recurses and executes PC again, it'll miss the breakpoint.
2537 So we leave the breakpoint inserted anyway, but we need to
2538 record that we tried to step a breakpoint instruction, so
372316f1
PA
2539 that adjust_pc_after_break doesn't end up confused.
2540
dda83cd7 2541 - In non-stop if we insert a breakpoint (e.g., a step-resume)
372316f1
PA
2542 in one thread after another thread that was stepping had been
2543 momentarily paused for a step-over. When we re-resume the
2544 stepping thread, it may be resumed from that address with a
2545 breakpoint that hasn't trapped yet. Seen with
2546 gdb.threads/non-stop-fair-events.exp, on targets that don't
2547 do displaced stepping. */
2548
1eb8556f
SM
2549 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2550 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2551
2552 tp->stepped_breakpoint = 1;
2553
b0f16a3e
SM
2554 /* Most targets can step a breakpoint instruction, thus
2555 executing it normally. But if this one cannot, just
2556 continue and we will hit it anyway. */
7f5ef605 2557 if (gdbarch_cannot_step_breakpoint (gdbarch))
c4464ade 2558 step = false;
b0f16a3e 2559 }
ef5cf84e 2560
b0f16a3e 2561 if (debug_displaced
cb71640d 2562 && tp->control.trap_expected
3fc8eb30 2563 && use_displaced_stepping (tp)
cb71640d 2564 && !step_over_info_valid_p ())
b0f16a3e 2565 {
00431a78 2566 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2567 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2568 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2569 gdb_byte buf[4];
2570
b0f16a3e 2571 read_memory (actual_pc, buf, sizeof (buf));
136821d9
SM
2572 displaced_debug_printf ("run %s: %s",
2573 paddress (resume_gdbarch, actual_pc),
2574 displaced_step_dump_bytes
2575 (buf, sizeof (buf)).c_str ());
b0f16a3e 2576 }
237fc4c9 2577
b0f16a3e
SM
2578 if (tp->control.may_range_step)
2579 {
2580 /* If we're resuming a thread with the PC out of the step
2581 range, then we're doing some nested/finer run control
2582 operation, like stepping the thread out of the dynamic
2583 linker or the displaced stepping scratch pad. We
2584 shouldn't have allowed a range step then. */
2585 gdb_assert (pc_in_thread_step_range (pc, tp));
2586 }
c1e36e3e 2587
64ce06e4 2588 do_target_resume (resume_ptid, step, sig);
719546c4 2589 tp->resumed = true;
c906108c 2590}
71d378ae
PA
2591
2592/* Resume the inferior. SIG is the signal to give the inferior
2593 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2594 rolls back state on error. */
2595
aff4e175 2596static void
71d378ae
PA
2597resume (gdb_signal sig)
2598{
a70b8144 2599 try
71d378ae
PA
2600 {
2601 resume_1 (sig);
2602 }
230d2906 2603 catch (const gdb_exception &ex)
71d378ae
PA
2604 {
2605 /* If resuming is being aborted for any reason, delete any
2606 single-step breakpoint resume_1 may have created, to avoid
2607 confusing the following resumption, and to avoid leaving
2608 single-step breakpoints perturbing other threads, in case
2609 we're running in non-stop mode. */
2610 if (inferior_ptid != null_ptid)
2611 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2612 throw;
71d378ae 2613 }
71d378ae
PA
2614}
2615
c906108c 2616\f
237fc4c9 2617/* Proceeding. */
c906108c 2618
4c2f2a79
PA
2619/* See infrun.h. */
2620
2621/* Counter that tracks number of user visible stops. This can be used
2622 to tell whether a command has proceeded the inferior past the
2623 current location. This allows e.g., inferior function calls in
2624 breakpoint commands to not interrupt the command list. When the
2625 call finishes successfully, the inferior is standing at the same
2626 breakpoint as if nothing happened (and so we don't call
2627 normal_stop). */
2628static ULONGEST current_stop_id;
2629
2630/* See infrun.h. */
2631
2632ULONGEST
2633get_stop_id (void)
2634{
2635 return current_stop_id;
2636}
2637
2638/* Called when we report a user visible stop. */
2639
2640static void
2641new_stop_id (void)
2642{
2643 current_stop_id++;
2644}
2645
c906108c
SS
2646/* Clear out all variables saying what to do when inferior is continued.
2647 First do this, then set the ones you want, then call `proceed'. */
2648
a7212384
UW
2649static void
2650clear_proceed_status_thread (struct thread_info *tp)
c906108c 2651{
1eb8556f 2652 infrun_debug_printf ("%s", target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2653
372316f1
PA
2654 /* If we're starting a new sequence, then the previous finished
2655 single-step is no longer relevant. */
2656 if (tp->suspend.waitstatus_pending_p)
2657 {
2658 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2659 {
1eb8556f
SM
2660 infrun_debug_printf ("pending event of %s was a finished step. "
2661 "Discarding.",
2662 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2663
2664 tp->suspend.waitstatus_pending_p = 0;
2665 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2666 }
1eb8556f 2667 else
372316f1 2668 {
1eb8556f
SM
2669 infrun_debug_printf
2670 ("thread %s has pending wait status %s (currently_stepping=%d).",
2671 target_pid_to_str (tp->ptid).c_str (),
2672 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2673 currently_stepping (tp));
372316f1
PA
2674 }
2675 }
2676
70509625
PA
2677 /* If this signal should not be seen by program, give it zero.
2678 Used for debugging signals. */
2679 if (!signal_pass_state (tp->suspend.stop_signal))
2680 tp->suspend.stop_signal = GDB_SIGNAL_0;
2681
46e3ed7f 2682 delete tp->thread_fsm;
243a9253
PA
2683 tp->thread_fsm = NULL;
2684
16c381f0
JK
2685 tp->control.trap_expected = 0;
2686 tp->control.step_range_start = 0;
2687 tp->control.step_range_end = 0;
c1e36e3e 2688 tp->control.may_range_step = 0;
16c381f0
JK
2689 tp->control.step_frame_id = null_frame_id;
2690 tp->control.step_stack_frame_id = null_frame_id;
2691 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2692 tp->control.step_start_function = NULL;
a7212384 2693 tp->stop_requested = 0;
4e1c45ea 2694
16c381f0 2695 tp->control.stop_step = 0;
32400beb 2696
16c381f0 2697 tp->control.proceed_to_finish = 0;
414c69f7 2698
856e7dd6 2699 tp->control.stepping_command = 0;
17b2616c 2700
a7212384 2701 /* Discard any remaining commands or status from previous stop. */
16c381f0 2702 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2703}
32400beb 2704
a7212384 2705void
70509625 2706clear_proceed_status (int step)
a7212384 2707{
f2665db5
MM
2708 /* With scheduler-locking replay, stop replaying other threads if we're
2709 not replaying the user-visible resume ptid.
2710
2711 This is a convenience feature to not require the user to explicitly
2712 stop replaying the other threads. We're assuming that the user's
2713 intent is to resume tracing the recorded process. */
2714 if (!non_stop && scheduler_mode == schedlock_replay
2715 && target_record_is_replaying (minus_one_ptid)
2716 && !target_record_will_replay (user_visible_resume_ptid (step),
2717 execution_direction))
2718 target_record_stop_replaying ();
2719
08036331 2720 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2721 {
08036331 2722 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2723 process_stratum_target *resume_target
2724 = user_visible_resume_target (resume_ptid);
70509625
PA
2725
2726 /* In all-stop mode, delete the per-thread status of all threads
2727 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2728 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2729 clear_proceed_status_thread (tp);
6c95b8df
PA
2730 }
2731
d7e15655 2732 if (inferior_ptid != null_ptid)
a7212384
UW
2733 {
2734 struct inferior *inferior;
2735
2736 if (non_stop)
2737 {
6c95b8df
PA
2738 /* If in non-stop mode, only delete the per-thread status of
2739 the current thread. */
a7212384
UW
2740 clear_proceed_status_thread (inferior_thread ());
2741 }
6c95b8df 2742
d6b48e9c 2743 inferior = current_inferior ();
16c381f0 2744 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2745 }
2746
76727919 2747 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2748}
2749
99619bea
PA
2750/* Returns true if TP is still stopped at a breakpoint that needs
2751 stepping-over in order to make progress. If the breakpoint is gone
2752 meanwhile, we can skip the whole step-over dance. */
ea67f13b 2753
c4464ade 2754static bool
6c4cfb24 2755thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2756{
2757 if (tp->stepping_over_breakpoint)
2758 {
00431a78 2759 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2760
a01bda52 2761 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2762 regcache_read_pc (regcache))
2763 == ordinary_breakpoint_here)
c4464ade 2764 return true;
99619bea
PA
2765
2766 tp->stepping_over_breakpoint = 0;
2767 }
2768
c4464ade 2769 return false;
99619bea
PA
2770}
2771
6c4cfb24
PA
2772/* Check whether thread TP still needs to start a step-over in order
2773 to make progress when resumed. Returns an bitwise or of enum
2774 step_over_what bits, indicating what needs to be stepped over. */
2775
8d297bbf 2776static step_over_what
6c4cfb24
PA
2777thread_still_needs_step_over (struct thread_info *tp)
2778{
8d297bbf 2779 step_over_what what = 0;
6c4cfb24
PA
2780
2781 if (thread_still_needs_step_over_bp (tp))
2782 what |= STEP_OVER_BREAKPOINT;
2783
2784 if (tp->stepping_over_watchpoint
9aed480c 2785 && !target_have_steppable_watchpoint ())
6c4cfb24
PA
2786 what |= STEP_OVER_WATCHPOINT;
2787
2788 return what;
2789}
2790
483805cf
PA
2791/* Returns true if scheduler locking applies. STEP indicates whether
2792 we're about to do a step/next-like command to a thread. */
2793
c4464ade 2794static bool
856e7dd6 2795schedlock_applies (struct thread_info *tp)
483805cf
PA
2796{
2797 return (scheduler_mode == schedlock_on
2798 || (scheduler_mode == schedlock_step
f2665db5
MM
2799 && tp->control.stepping_command)
2800 || (scheduler_mode == schedlock_replay
2801 && target_record_will_replay (minus_one_ptid,
2802 execution_direction)));
483805cf
PA
2803}
2804
5b6d1e4f
PA
2805/* Calls target_commit_resume on all targets. */
2806
2807static void
2808commit_resume_all_targets ()
2809{
2810 scoped_restore_current_thread restore_thread;
2811
2812 /* Map between process_target and a representative inferior. This
2813 is to avoid committing a resume in the same target more than
2814 once. Resumptions must be idempotent, so this is an
2815 optimization. */
2816 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2817
2818 for (inferior *inf : all_non_exited_inferiors ())
2819 if (inf->has_execution ())
2820 conn_inf[inf->process_target ()] = inf;
2821
2822 for (const auto &ci : conn_inf)
2823 {
2824 inferior *inf = ci.second;
2825 switch_to_inferior_no_thread (inf);
2826 target_commit_resume ();
2827 }
2828}
2829
2f4fcf00
PA
2830/* Check that all the targets we're about to resume are in non-stop
2831 mode. Ideally, we'd only care whether all targets support
2832 target-async, but we're not there yet. E.g., stop_all_threads
2833 doesn't know how to handle all-stop targets. Also, the remote
2834 protocol in all-stop mode is synchronous, irrespective of
2835 target-async, which means that things like a breakpoint re-set
2836 triggered by one target would try to read memory from all targets
2837 and fail. */
2838
2839static void
2840check_multi_target_resumption (process_stratum_target *resume_target)
2841{
2842 if (!non_stop && resume_target == nullptr)
2843 {
2844 scoped_restore_current_thread restore_thread;
2845
2846 /* This is used to track whether we're resuming more than one
2847 target. */
2848 process_stratum_target *first_connection = nullptr;
2849
2850 /* The first inferior we see with a target that does not work in
2851 always-non-stop mode. */
2852 inferior *first_not_non_stop = nullptr;
2853
2854 for (inferior *inf : all_non_exited_inferiors (resume_target))
2855 {
2856 switch_to_inferior_no_thread (inf);
2857
55f6301a 2858 if (!target_has_execution ())
2f4fcf00
PA
2859 continue;
2860
2861 process_stratum_target *proc_target
2862 = current_inferior ()->process_target();
2863
2864 if (!target_is_non_stop_p ())
2865 first_not_non_stop = inf;
2866
2867 if (first_connection == nullptr)
2868 first_connection = proc_target;
2869 else if (first_connection != proc_target
2870 && first_not_non_stop != nullptr)
2871 {
2872 switch_to_inferior_no_thread (first_not_non_stop);
2873
2874 proc_target = current_inferior ()->process_target();
2875
2876 error (_("Connection %d (%s) does not support "
2877 "multi-target resumption."),
2878 proc_target->connection_number,
2879 make_target_connection_string (proc_target).c_str ());
2880 }
2881 }
2882 }
2883}
2884
c906108c
SS
2885/* Basic routine for continuing the program in various fashions.
2886
2887 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2888 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2889 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2890
2891 You should call clear_proceed_status before calling proceed. */
2892
2893void
64ce06e4 2894proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2895{
e58b0e63
PA
2896 struct regcache *regcache;
2897 struct gdbarch *gdbarch;
e58b0e63 2898 CORE_ADDR pc;
4d9d9d04
PA
2899 struct execution_control_state ecss;
2900 struct execution_control_state *ecs = &ecss;
c4464ade 2901 bool started;
c906108c 2902
e58b0e63
PA
2903 /* If we're stopped at a fork/vfork, follow the branch set by the
2904 "set follow-fork-mode" command; otherwise, we'll just proceed
2905 resuming the current thread. */
2906 if (!follow_fork ())
2907 {
2908 /* The target for some reason decided not to resume. */
2909 normal_stop ();
f148b27e 2910 if (target_can_async_p ())
b1a35af2 2911 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
2912 return;
2913 }
2914
842951eb
PA
2915 /* We'll update this if & when we switch to a new thread. */
2916 previous_inferior_ptid = inferior_ptid;
2917
e58b0e63 2918 regcache = get_current_regcache ();
ac7936df 2919 gdbarch = regcache->arch ();
8b86c959
YQ
2920 const address_space *aspace = regcache->aspace ();
2921
fc75c28b
TBA
2922 pc = regcache_read_pc_protected (regcache);
2923
08036331 2924 thread_info *cur_thr = inferior_thread ();
e58b0e63 2925
99619bea 2926 /* Fill in with reasonable starting values. */
08036331 2927 init_thread_stepping_state (cur_thr);
99619bea 2928
08036331 2929 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2930
5b6d1e4f
PA
2931 ptid_t resume_ptid
2932 = user_visible_resume_ptid (cur_thr->control.stepping_command);
2933 process_stratum_target *resume_target
2934 = user_visible_resume_target (resume_ptid);
2935
2f4fcf00
PA
2936 check_multi_target_resumption (resume_target);
2937
2acceee2 2938 if (addr == (CORE_ADDR) -1)
c906108c 2939 {
08036331 2940 if (pc == cur_thr->suspend.stop_pc
af48d08f 2941 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2942 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2943 /* There is a breakpoint at the address we will resume at,
2944 step one instruction before inserting breakpoints so that
2945 we do not stop right away (and report a second hit at this
b2175913
MS
2946 breakpoint).
2947
2948 Note, we don't do this in reverse, because we won't
2949 actually be executing the breakpoint insn anyway.
2950 We'll be (un-)executing the previous instruction. */
08036331 2951 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
2952 else if (gdbarch_single_step_through_delay_p (gdbarch)
2953 && gdbarch_single_step_through_delay (gdbarch,
2954 get_current_frame ()))
3352ef37
AC
2955 /* We stepped onto an instruction that needs to be stepped
2956 again before re-inserting the breakpoint, do so. */
08036331 2957 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
2958 }
2959 else
2960 {
515630c5 2961 regcache_write_pc (regcache, addr);
c906108c
SS
2962 }
2963
70509625 2964 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 2965 cur_thr->suspend.stop_signal = siggnal;
70509625 2966
4d9d9d04
PA
2967 /* If an exception is thrown from this point on, make sure to
2968 propagate GDB's knowledge of the executing state to the
2969 frontend/user running state. */
5b6d1e4f 2970 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
2971
2972 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2973 threads (e.g., we might need to set threads stepping over
2974 breakpoints first), from the user/frontend's point of view, all
2975 threads in RESUME_PTID are now running. Unless we're calling an
2976 inferior function, as in that case we pretend the inferior
2977 doesn't run at all. */
08036331 2978 if (!cur_thr->control.in_infcall)
719546c4 2979 set_running (resume_target, resume_ptid, true);
17b2616c 2980
1eb8556f
SM
2981 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
2982 gdb_signal_to_symbol_string (siggnal));
527159b7 2983
4d9d9d04
PA
2984 annotate_starting ();
2985
2986 /* Make sure that output from GDB appears before output from the
2987 inferior. */
2988 gdb_flush (gdb_stdout);
2989
d930703d
PA
2990 /* Since we've marked the inferior running, give it the terminal. A
2991 QUIT/Ctrl-C from here on is forwarded to the target (which can
2992 still detect attempts to unblock a stuck connection with repeated
2993 Ctrl-C from within target_pass_ctrlc). */
2994 target_terminal::inferior ();
2995
4d9d9d04
PA
2996 /* In a multi-threaded task we may select another thread and
2997 then continue or step.
2998
2999 But if a thread that we're resuming had stopped at a breakpoint,
3000 it will immediately cause another breakpoint stop without any
3001 execution (i.e. it will report a breakpoint hit incorrectly). So
3002 we must step over it first.
3003
3004 Look for threads other than the current (TP) that reported a
3005 breakpoint hit and haven't been resumed yet since. */
3006
3007 /* If scheduler locking applies, we can avoid iterating over all
3008 threads. */
08036331 3009 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 3010 {
5b6d1e4f
PA
3011 for (thread_info *tp : all_non_exited_threads (resume_target,
3012 resume_ptid))
08036331 3013 {
f3f8ece4
PA
3014 switch_to_thread_no_regs (tp);
3015
4d9d9d04
PA
3016 /* Ignore the current thread here. It's handled
3017 afterwards. */
08036331 3018 if (tp == cur_thr)
4d9d9d04 3019 continue;
c906108c 3020
4d9d9d04
PA
3021 if (!thread_still_needs_step_over (tp))
3022 continue;
3023
3024 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 3025
1eb8556f
SM
3026 infrun_debug_printf ("need to step-over [%s] first",
3027 target_pid_to_str (tp->ptid).c_str ());
99619bea 3028
4d9d9d04 3029 thread_step_over_chain_enqueue (tp);
2adfaa28 3030 }
f3f8ece4
PA
3031
3032 switch_to_thread (cur_thr);
30852783
UW
3033 }
3034
4d9d9d04
PA
3035 /* Enqueue the current thread last, so that we move all other
3036 threads over their breakpoints first. */
08036331
PA
3037 if (cur_thr->stepping_over_breakpoint)
3038 thread_step_over_chain_enqueue (cur_thr);
30852783 3039
4d9d9d04
PA
3040 /* If the thread isn't started, we'll still need to set its prev_pc,
3041 so that switch_back_to_stepped_thread knows the thread hasn't
3042 advanced. Must do this before resuming any thread, as in
3043 all-stop/remote, once we resume we can't send any other packet
3044 until the target stops again. */
fc75c28b 3045 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3046
a9bc57b9
TT
3047 {
3048 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 3049
a9bc57b9 3050 started = start_step_over ();
c906108c 3051
a9bc57b9
TT
3052 if (step_over_info_valid_p ())
3053 {
3054 /* Either this thread started a new in-line step over, or some
3055 other thread was already doing one. In either case, don't
3056 resume anything else until the step-over is finished. */
3057 }
3058 else if (started && !target_is_non_stop_p ())
3059 {
3060 /* A new displaced stepping sequence was started. In all-stop,
3061 we can't talk to the target anymore until it next stops. */
3062 }
3063 else if (!non_stop && target_is_non_stop_p ())
3064 {
3065 /* In all-stop, but the target is always in non-stop mode.
3066 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3067 for (thread_info *tp : all_non_exited_threads (resume_target,
3068 resume_ptid))
3069 {
3070 switch_to_thread_no_regs (tp);
3071
f9fac3c8
SM
3072 if (!tp->inf->has_execution ())
3073 {
1eb8556f
SM
3074 infrun_debug_printf ("[%s] target has no execution",
3075 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3076 continue;
3077 }
f3f8ece4 3078
f9fac3c8
SM
3079 if (tp->resumed)
3080 {
1eb8556f
SM
3081 infrun_debug_printf ("[%s] resumed",
3082 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3083 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3084 continue;
3085 }
fbea99ea 3086
f9fac3c8
SM
3087 if (thread_is_in_step_over_chain (tp))
3088 {
1eb8556f
SM
3089 infrun_debug_printf ("[%s] needs step-over",
3090 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3091 continue;
3092 }
fbea99ea 3093
1eb8556f 3094 infrun_debug_printf ("resuming %s",
dda83cd7 3095 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3096
f9fac3c8
SM
3097 reset_ecs (ecs, tp);
3098 switch_to_thread (tp);
3099 keep_going_pass_signal (ecs);
3100 if (!ecs->wait_some_more)
3101 error (_("Command aborted."));
3102 }
a9bc57b9 3103 }
08036331 3104 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3105 {
3106 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3107 reset_ecs (ecs, cur_thr);
3108 switch_to_thread (cur_thr);
a9bc57b9
TT
3109 keep_going_pass_signal (ecs);
3110 if (!ecs->wait_some_more)
3111 error (_("Command aborted."));
3112 }
3113 }
c906108c 3114
5b6d1e4f 3115 commit_resume_all_targets ();
85ad3aaf 3116
731f534f 3117 finish_state.release ();
c906108c 3118
873657b9
PA
3119 /* If we've switched threads above, switch back to the previously
3120 current thread. We don't want the user to see a different
3121 selected thread. */
3122 switch_to_thread (cur_thr);
3123
0b333c5e
PA
3124 /* Tell the event loop to wait for it to stop. If the target
3125 supports asynchronous execution, it'll do this from within
3126 target_resume. */
362646f5 3127 if (!target_can_async_p ())
0b333c5e 3128 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3129}
c906108c
SS
3130\f
3131
3132/* Start remote-debugging of a machine over a serial link. */
96baa820 3133
c906108c 3134void
8621d6a9 3135start_remote (int from_tty)
c906108c 3136{
5b6d1e4f
PA
3137 inferior *inf = current_inferior ();
3138 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3139
1777feb0 3140 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3141 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3142 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3143 nothing is returned (instead of just blocking). Because of this,
3144 targets expecting an immediate response need to, internally, set
3145 things up so that the target_wait() is forced to eventually
1777feb0 3146 timeout. */
6426a772
JM
3147 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3148 differentiate to its caller what the state of the target is after
3149 the initial open has been performed. Here we're assuming that
3150 the target has stopped. It should be possible to eventually have
3151 target_open() return to the caller an indication that the target
3152 is currently running and GDB state should be set to the same as
1777feb0 3153 for an async run. */
5b6d1e4f 3154 wait_for_inferior (inf);
8621d6a9
DJ
3155
3156 /* Now that the inferior has stopped, do any bookkeeping like
3157 loading shared libraries. We want to do this before normal_stop,
3158 so that the displayed frame is up to date. */
a7aba266 3159 post_create_inferior (from_tty);
8621d6a9 3160
6426a772 3161 normal_stop ();
c906108c
SS
3162}
3163
3164/* Initialize static vars when a new inferior begins. */
3165
3166void
96baa820 3167init_wait_for_inferior (void)
c906108c
SS
3168{
3169 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3170
c906108c
SS
3171 breakpoint_init_inferior (inf_starting);
3172
70509625 3173 clear_proceed_status (0);
9f976b41 3174
ab1ddbcf 3175 nullify_last_target_wait_ptid ();
237fc4c9 3176
842951eb 3177 previous_inferior_ptid = inferior_ptid;
c906108c 3178}
237fc4c9 3179
c906108c 3180\f
488f131b 3181
ec9499be 3182static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3183
568d6575
UW
3184static void handle_step_into_function (struct gdbarch *gdbarch,
3185 struct execution_control_state *ecs);
3186static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3187 struct execution_control_state *ecs);
4f5d7f63 3188static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3189static void check_exception_resume (struct execution_control_state *,
28106bc2 3190 struct frame_info *);
611c83ae 3191
bdc36728 3192static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3193static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3194static void keep_going (struct execution_control_state *ecs);
94c57d6a 3195static void process_event_stop_test (struct execution_control_state *ecs);
c4464ade 3196static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3197
252fbfc8
PA
3198/* This function is attached as a "thread_stop_requested" observer.
3199 Cleanup local state that assumed the PTID was to be resumed, and
3200 report the stop to the frontend. */
3201
2c0b251b 3202static void
252fbfc8
PA
3203infrun_thread_stop_requested (ptid_t ptid)
3204{
5b6d1e4f
PA
3205 process_stratum_target *curr_target = current_inferior ()->process_target ();
3206
c65d6b55
PA
3207 /* PTID was requested to stop. If the thread was already stopped,
3208 but the user/frontend doesn't know about that yet (e.g., the
3209 thread had been temporarily paused for some step-over), set up
3210 for reporting the stop now. */
5b6d1e4f 3211 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3212 {
3213 if (tp->state != THREAD_RUNNING)
3214 continue;
3215 if (tp->executing)
3216 continue;
c65d6b55 3217
08036331
PA
3218 /* Remove matching threads from the step-over queue, so
3219 start_step_over doesn't try to resume them
3220 automatically. */
3221 if (thread_is_in_step_over_chain (tp))
3222 thread_step_over_chain_remove (tp);
c65d6b55 3223
08036331
PA
3224 /* If the thread is stopped, but the user/frontend doesn't
3225 know about that yet, queue a pending event, as if the
3226 thread had just stopped now. Unless the thread already had
3227 a pending event. */
3228 if (!tp->suspend.waitstatus_pending_p)
3229 {
3230 tp->suspend.waitstatus_pending_p = 1;
3231 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3232 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3233 }
c65d6b55 3234
08036331
PA
3235 /* Clear the inline-frame state, since we're re-processing the
3236 stop. */
5b6d1e4f 3237 clear_inline_frame_state (tp);
c65d6b55 3238
08036331
PA
3239 /* If this thread was paused because some other thread was
3240 doing an inline-step over, let that finish first. Once
3241 that happens, we'll restart all threads and consume pending
3242 stop events then. */
3243 if (step_over_info_valid_p ())
3244 continue;
3245
3246 /* Otherwise we can process the (new) pending event now. Set
3247 it so this pending event is considered by
3248 do_target_wait. */
719546c4 3249 tp->resumed = true;
08036331 3250 }
252fbfc8
PA
3251}
3252
a07daef3
PA
3253static void
3254infrun_thread_thread_exit (struct thread_info *tp, int silent)
3255{
5b6d1e4f
PA
3256 if (target_last_proc_target == tp->inf->process_target ()
3257 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3258 nullify_last_target_wait_ptid ();
3259}
3260
0cbcdb96
PA
3261/* Delete the step resume, single-step and longjmp/exception resume
3262 breakpoints of TP. */
4e1c45ea 3263
0cbcdb96
PA
3264static void
3265delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3266{
0cbcdb96
PA
3267 delete_step_resume_breakpoint (tp);
3268 delete_exception_resume_breakpoint (tp);
34b7e8a6 3269 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3270}
3271
0cbcdb96
PA
3272/* If the target still has execution, call FUNC for each thread that
3273 just stopped. In all-stop, that's all the non-exited threads; in
3274 non-stop, that's the current thread, only. */
3275
3276typedef void (*for_each_just_stopped_thread_callback_func)
3277 (struct thread_info *tp);
4e1c45ea
PA
3278
3279static void
0cbcdb96 3280for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3281{
55f6301a 3282 if (!target_has_execution () || inferior_ptid == null_ptid)
4e1c45ea
PA
3283 return;
3284
fbea99ea 3285 if (target_is_non_stop_p ())
4e1c45ea 3286 {
0cbcdb96
PA
3287 /* If in non-stop mode, only the current thread stopped. */
3288 func (inferior_thread ());
4e1c45ea
PA
3289 }
3290 else
0cbcdb96 3291 {
0cbcdb96 3292 /* In all-stop mode, all threads have stopped. */
08036331
PA
3293 for (thread_info *tp : all_non_exited_threads ())
3294 func (tp);
0cbcdb96
PA
3295 }
3296}
3297
3298/* Delete the step resume and longjmp/exception resume breakpoints of
3299 the threads that just stopped. */
3300
3301static void
3302delete_just_stopped_threads_infrun_breakpoints (void)
3303{
3304 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3305}
3306
3307/* Delete the single-step breakpoints of the threads that just
3308 stopped. */
7c16b83e 3309
34b7e8a6
PA
3310static void
3311delete_just_stopped_threads_single_step_breakpoints (void)
3312{
3313 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3314}
3315
221e1a37 3316/* See infrun.h. */
223698f8 3317
221e1a37 3318void
223698f8
DE
3319print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3320 const struct target_waitstatus *ws)
3321{
23fdd69e 3322 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3323 string_file stb;
223698f8
DE
3324
3325 /* The text is split over several lines because it was getting too long.
3326 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3327 output as a unit; we want only one timestamp printed if debug_timestamp
3328 is set. */
3329
1eb8556f 3330 stb.printf ("[infrun] target_wait (%d.%ld.%ld",
e99b03dc 3331 waiton_ptid.pid (),
e38504b3 3332 waiton_ptid.lwp (),
cc6bcb54 3333 waiton_ptid.tid ());
e99b03dc 3334 if (waiton_ptid.pid () != -1)
a068643d 3335 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731 3336 stb.printf (", status) =\n");
1eb8556f 3337 stb.printf ("[infrun] %d.%ld.%ld [%s],\n",
e99b03dc 3338 result_ptid.pid (),
e38504b3 3339 result_ptid.lwp (),
cc6bcb54 3340 result_ptid.tid (),
a068643d 3341 target_pid_to_str (result_ptid).c_str ());
1eb8556f 3342 stb.printf ("[infrun] %s\n", status_string.c_str ());
223698f8
DE
3343
3344 /* This uses %s in part to handle %'s in the text, but also to avoid
3345 a gcc error: the format attribute requires a string literal. */
d7e74731 3346 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3347}
3348
372316f1
PA
3349/* Select a thread at random, out of those which are resumed and have
3350 had events. */
3351
3352static struct thread_info *
5b6d1e4f 3353random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3354{
372316f1 3355 int num_events = 0;
08036331 3356
5b6d1e4f 3357 auto has_event = [&] (thread_info *tp)
08036331 3358 {
5b6d1e4f
PA
3359 return (tp->ptid.matches (waiton_ptid)
3360 && tp->resumed
08036331
PA
3361 && tp->suspend.waitstatus_pending_p);
3362 };
372316f1
PA
3363
3364 /* First see how many events we have. Count only resumed threads
3365 that have an event pending. */
5b6d1e4f 3366 for (thread_info *tp : inf->non_exited_threads ())
08036331 3367 if (has_event (tp))
372316f1
PA
3368 num_events++;
3369
3370 if (num_events == 0)
3371 return NULL;
3372
3373 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3374 int random_selector = (int) ((num_events * (double) rand ())
3375 / (RAND_MAX + 1.0));
372316f1 3376
1eb8556f
SM
3377 if (num_events > 1)
3378 infrun_debug_printf ("Found %d events, selecting #%d",
3379 num_events, random_selector);
372316f1
PA
3380
3381 /* Select the Nth thread that has had an event. */
5b6d1e4f 3382 for (thread_info *tp : inf->non_exited_threads ())
08036331 3383 if (has_event (tp))
372316f1 3384 if (random_selector-- == 0)
08036331 3385 return tp;
372316f1 3386
08036331 3387 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3388}
3389
3390/* Wrapper for target_wait that first checks whether threads have
3391 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3392 more events. INF is the inferior we're using to call target_wait
3393 on. */
372316f1
PA
3394
3395static ptid_t
5b6d1e4f 3396do_target_wait_1 (inferior *inf, ptid_t ptid,
b60cea74 3397 target_waitstatus *status, target_wait_flags options)
372316f1
PA
3398{
3399 ptid_t event_ptid;
3400 struct thread_info *tp;
3401
24ed6739
AB
3402 /* We know that we are looking for an event in the target of inferior
3403 INF, but we don't know which thread the event might come from. As
3404 such we want to make sure that INFERIOR_PTID is reset so that none of
3405 the wait code relies on it - doing so is always a mistake. */
3406 switch_to_inferior_no_thread (inf);
3407
372316f1
PA
3408 /* First check if there is a resumed thread with a wait status
3409 pending. */
d7e15655 3410 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3411 {
5b6d1e4f 3412 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3413 }
3414 else
3415 {
1eb8556f
SM
3416 infrun_debug_printf ("Waiting for specific thread %s.",
3417 target_pid_to_str (ptid).c_str ());
372316f1
PA
3418
3419 /* We have a specific thread to check. */
5b6d1e4f 3420 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3421 gdb_assert (tp != NULL);
3422 if (!tp->suspend.waitstatus_pending_p)
3423 tp = NULL;
3424 }
3425
3426 if (tp != NULL
3427 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3428 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3429 {
00431a78 3430 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3431 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3432 CORE_ADDR pc;
3433 int discard = 0;
3434
3435 pc = regcache_read_pc (regcache);
3436
3437 if (pc != tp->suspend.stop_pc)
3438 {
1eb8556f
SM
3439 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3440 target_pid_to_str (tp->ptid).c_str (),
3441 paddress (gdbarch, tp->suspend.stop_pc),
3442 paddress (gdbarch, pc));
372316f1
PA
3443 discard = 1;
3444 }
a01bda52 3445 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1 3446 {
1eb8556f
SM
3447 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3448 target_pid_to_str (tp->ptid).c_str (),
3449 paddress (gdbarch, pc));
372316f1
PA
3450
3451 discard = 1;
3452 }
3453
3454 if (discard)
3455 {
1eb8556f
SM
3456 infrun_debug_printf ("pending event of %s cancelled.",
3457 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3458
3459 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3460 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3461 }
3462 }
3463
3464 if (tp != NULL)
3465 {
1eb8556f
SM
3466 infrun_debug_printf ("Using pending wait status %s for %s.",
3467 target_waitstatus_to_string
3468 (&tp->suspend.waitstatus).c_str (),
3469 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3470
3471 /* Now that we've selected our final event LWP, un-adjust its PC
3472 if it was a software breakpoint (and the target doesn't
3473 always adjust the PC itself). */
3474 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3475 && !target_supports_stopped_by_sw_breakpoint ())
3476 {
3477 struct regcache *regcache;
3478 struct gdbarch *gdbarch;
3479 int decr_pc;
3480
00431a78 3481 regcache = get_thread_regcache (tp);
ac7936df 3482 gdbarch = regcache->arch ();
372316f1
PA
3483
3484 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3485 if (decr_pc != 0)
3486 {
3487 CORE_ADDR pc;
3488
3489 pc = regcache_read_pc (regcache);
3490 regcache_write_pc (regcache, pc + decr_pc);
3491 }
3492 }
3493
3494 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3495 *status = tp->suspend.waitstatus;
3496 tp->suspend.waitstatus_pending_p = 0;
3497
3498 /* Wake up the event loop again, until all pending events are
3499 processed. */
3500 if (target_is_async_p ())
3501 mark_async_event_handler (infrun_async_inferior_event_token);
3502 return tp->ptid;
3503 }
3504
3505 /* But if we don't find one, we'll have to wait. */
3506
d3a07122
SM
3507 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3508 a blocking wait. */
3509 if (!target_can_async_p ())
3510 options &= ~TARGET_WNOHANG;
3511
372316f1
PA
3512 if (deprecated_target_wait_hook)
3513 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3514 else
3515 event_ptid = target_wait (ptid, status, options);
3516
3517 return event_ptid;
3518}
3519
5b6d1e4f
PA
3520/* Wrapper for target_wait that first checks whether threads have
3521 pending statuses to report before actually asking the target for
b3e3a4c1 3522 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
3523
3524static bool
b60cea74
TT
3525do_target_wait (ptid_t wait_ptid, execution_control_state *ecs,
3526 target_wait_flags options)
5b6d1e4f
PA
3527{
3528 int num_inferiors = 0;
3529 int random_selector;
3530
b3e3a4c1
SM
3531 /* For fairness, we pick the first inferior/target to poll at random
3532 out of all inferiors that may report events, and then continue
3533 polling the rest of the inferior list starting from that one in a
3534 circular fashion until the whole list is polled once. */
5b6d1e4f
PA
3535
3536 auto inferior_matches = [&wait_ptid] (inferior *inf)
3537 {
3538 return (inf->process_target () != NULL
5b6d1e4f
PA
3539 && ptid_t (inf->pid).matches (wait_ptid));
3540 };
3541
b3e3a4c1 3542 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
3543 for (inferior *inf : all_inferiors ())
3544 if (inferior_matches (inf))
3545 num_inferiors++;
3546
3547 if (num_inferiors == 0)
3548 {
3549 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3550 return false;
3551 }
3552
b3e3a4c1 3553 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
3554 random_selector = (int)
3555 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3556
1eb8556f
SM
3557 if (num_inferiors > 1)
3558 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3559 num_inferiors, random_selector);
5b6d1e4f 3560
b3e3a4c1 3561 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
3562
3563 inferior *selected = nullptr;
3564
3565 for (inferior *inf : all_inferiors ())
3566 if (inferior_matches (inf))
3567 if (random_selector-- == 0)
3568 {
3569 selected = inf;
3570 break;
3571 }
3572
b3e3a4c1 3573 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
3574 targets, starting from the selected one. */
3575
3576 auto do_wait = [&] (inferior *inf)
3577 {
5b6d1e4f
PA
3578 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3579 ecs->target = inf->process_target ();
3580 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3581 };
3582
b3e3a4c1
SM
3583 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3584 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
3585 reported the stop to the user, polling for events. */
3586 scoped_restore_current_thread restore_thread;
3587
3588 int inf_num = selected->num;
3589 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3590 if (inferior_matches (inf))
3591 if (do_wait (inf))
3592 return true;
3593
3594 for (inferior *inf = inferior_list;
3595 inf != NULL && inf->num < inf_num;
3596 inf = inf->next)
3597 if (inferior_matches (inf))
3598 if (do_wait (inf))
3599 return true;
3600
3601 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3602 return false;
3603}
3604
24291992
PA
3605/* Prepare and stabilize the inferior for detaching it. E.g.,
3606 detaching while a thread is displaced stepping is a recipe for
3607 crashing it, as nothing would readjust the PC out of the scratch
3608 pad. */
3609
3610void
3611prepare_for_detach (void)
3612{
3613 struct inferior *inf = current_inferior ();
f2907e49 3614 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3615
f5f01699 3616 displaced_step_inferior_state *displaced = &inf->displaced_step_state;
24291992
PA
3617
3618 /* Is any thread of this process displaced stepping? If not,
3619 there's nothing else to do. */
d20172fc 3620 if (displaced->step_thread == nullptr)
24291992
PA
3621 return;
3622
1eb8556f 3623 infrun_debug_printf ("displaced-stepping in-process while detaching");
24291992 3624
9bcb1f16 3625 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3626
00431a78 3627 while (displaced->step_thread != nullptr)
24291992 3628 {
24291992
PA
3629 struct execution_control_state ecss;
3630 struct execution_control_state *ecs;
3631
3632 ecs = &ecss;
3633 memset (ecs, 0, sizeof (*ecs));
3634
3635 overlay_cache_invalid = 1;
f15cb84a
YQ
3636 /* Flush target cache before starting to handle each event.
3637 Target was running and cache could be stale. This is just a
3638 heuristic. Running threads may modify target memory, but we
3639 don't get any event. */
3640 target_dcache_invalidate ();
24291992 3641
5b6d1e4f 3642 do_target_wait (pid_ptid, ecs, 0);
24291992
PA
3643
3644 if (debug_infrun)
3645 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3646
3647 /* If an error happens while handling the event, propagate GDB's
3648 knowledge of the executing state to the frontend/user running
3649 state. */
5b6d1e4f
PA
3650 scoped_finish_thread_state finish_state (inf->process_target (),
3651 minus_one_ptid);
24291992
PA
3652
3653 /* Now figure out what to do with the result of the result. */
3654 handle_inferior_event (ecs);
3655
3656 /* No error, don't finish the state yet. */
731f534f 3657 finish_state.release ();
24291992
PA
3658
3659 /* Breakpoints and watchpoints are not installed on the target
3660 at this point, and signals are passed directly to the
3661 inferior, so this must mean the process is gone. */
3662 if (!ecs->wait_some_more)
3663 {
9bcb1f16 3664 restore_detaching.release ();
24291992
PA
3665 error (_("Program exited while detaching"));
3666 }
3667 }
3668
9bcb1f16 3669 restore_detaching.release ();
24291992
PA
3670}
3671
cd0fc7c3 3672/* Wait for control to return from inferior to debugger.
ae123ec6 3673
cd0fc7c3
SS
3674 If inferior gets a signal, we may decide to start it up again
3675 instead of returning. That is why there is a loop in this function.
3676 When this function actually returns it means the inferior
3677 should be left stopped and GDB should read more commands. */
3678
5b6d1e4f
PA
3679static void
3680wait_for_inferior (inferior *inf)
cd0fc7c3 3681{
1eb8556f 3682 infrun_debug_printf ("wait_for_inferior ()");
527159b7 3683
4c41382a 3684 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3685
e6f5c25b
PA
3686 /* If an error happens while handling the event, propagate GDB's
3687 knowledge of the executing state to the frontend/user running
3688 state. */
5b6d1e4f
PA
3689 scoped_finish_thread_state finish_state
3690 (inf->process_target (), minus_one_ptid);
e6f5c25b 3691
c906108c
SS
3692 while (1)
3693 {
ae25568b
PA
3694 struct execution_control_state ecss;
3695 struct execution_control_state *ecs = &ecss;
29f49a6a 3696
ae25568b
PA
3697 memset (ecs, 0, sizeof (*ecs));
3698
ec9499be 3699 overlay_cache_invalid = 1;
ec9499be 3700
f15cb84a
YQ
3701 /* Flush target cache before starting to handle each event.
3702 Target was running and cache could be stale. This is just a
3703 heuristic. Running threads may modify target memory, but we
3704 don't get any event. */
3705 target_dcache_invalidate ();
3706
5b6d1e4f
PA
3707 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3708 ecs->target = inf->process_target ();
c906108c 3709
f00150c9 3710 if (debug_infrun)
5b6d1e4f 3711 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 3712
cd0fc7c3
SS
3713 /* Now figure out what to do with the result of the result. */
3714 handle_inferior_event (ecs);
c906108c 3715
cd0fc7c3
SS
3716 if (!ecs->wait_some_more)
3717 break;
3718 }
4e1c45ea 3719
e6f5c25b 3720 /* No error, don't finish the state yet. */
731f534f 3721 finish_state.release ();
cd0fc7c3 3722}
c906108c 3723
d3d4baed
PA
3724/* Cleanup that reinstalls the readline callback handler, if the
3725 target is running in the background. If while handling the target
3726 event something triggered a secondary prompt, like e.g., a
3727 pagination prompt, we'll have removed the callback handler (see
3728 gdb_readline_wrapper_line). Need to do this as we go back to the
3729 event loop, ready to process further input. Note this has no
3730 effect if the handler hasn't actually been removed, because calling
3731 rl_callback_handler_install resets the line buffer, thus losing
3732 input. */
3733
3734static void
d238133d 3735reinstall_readline_callback_handler_cleanup ()
d3d4baed 3736{
3b12939d
PA
3737 struct ui *ui = current_ui;
3738
3739 if (!ui->async)
6c400b59
PA
3740 {
3741 /* We're not going back to the top level event loop yet. Don't
3742 install the readline callback, as it'd prep the terminal,
3743 readline-style (raw, noecho) (e.g., --batch). We'll install
3744 it the next time the prompt is displayed, when we're ready
3745 for input. */
3746 return;
3747 }
3748
3b12939d 3749 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3750 gdb_rl_callback_handler_reinstall ();
3751}
3752
243a9253
PA
3753/* Clean up the FSMs of threads that are now stopped. In non-stop,
3754 that's just the event thread. In all-stop, that's all threads. */
3755
3756static void
3757clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3758{
08036331
PA
3759 if (ecs->event_thread != NULL
3760 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3761 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3762
3763 if (!non_stop)
3764 {
08036331 3765 for (thread_info *thr : all_non_exited_threads ())
dda83cd7 3766 {
243a9253
PA
3767 if (thr->thread_fsm == NULL)
3768 continue;
3769 if (thr == ecs->event_thread)
3770 continue;
3771
00431a78 3772 switch_to_thread (thr);
46e3ed7f 3773 thr->thread_fsm->clean_up (thr);
243a9253
PA
3774 }
3775
3776 if (ecs->event_thread != NULL)
00431a78 3777 switch_to_thread (ecs->event_thread);
243a9253
PA
3778 }
3779}
3780
3b12939d
PA
3781/* Helper for all_uis_check_sync_execution_done that works on the
3782 current UI. */
3783
3784static void
3785check_curr_ui_sync_execution_done (void)
3786{
3787 struct ui *ui = current_ui;
3788
3789 if (ui->prompt_state == PROMPT_NEEDED
3790 && ui->async
3791 && !gdb_in_secondary_prompt_p (ui))
3792 {
223ffa71 3793 target_terminal::ours ();
76727919 3794 gdb::observers::sync_execution_done.notify ();
3eb7562a 3795 ui_register_input_event_handler (ui);
3b12939d
PA
3796 }
3797}
3798
3799/* See infrun.h. */
3800
3801void
3802all_uis_check_sync_execution_done (void)
3803{
0e454242 3804 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3805 {
3806 check_curr_ui_sync_execution_done ();
3807 }
3808}
3809
a8836c93
PA
3810/* See infrun.h. */
3811
3812void
3813all_uis_on_sync_execution_starting (void)
3814{
0e454242 3815 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3816 {
3817 if (current_ui->prompt_state == PROMPT_NEEDED)
3818 async_disable_stdin ();
3819 }
3820}
3821
1777feb0 3822/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3823 event loop whenever a change of state is detected on the file
1777feb0
MS
3824 descriptor corresponding to the target. It can be called more than
3825 once to complete a single execution command. In such cases we need
3826 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3827 that this function is called for a single execution command, then
3828 report to the user that the inferior has stopped, and do the
1777feb0 3829 necessary cleanups. */
43ff13b4
JM
3830
3831void
b1a35af2 3832fetch_inferior_event ()
43ff13b4 3833{
0d1e5fa7 3834 struct execution_control_state ecss;
a474d7c2 3835 struct execution_control_state *ecs = &ecss;
0f641c01 3836 int cmd_done = 0;
43ff13b4 3837
0d1e5fa7
PA
3838 memset (ecs, 0, sizeof (*ecs));
3839
c61db772
PA
3840 /* Events are always processed with the main UI as current UI. This
3841 way, warnings, debug output, etc. are always consistently sent to
3842 the main console. */
4b6749b9 3843 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3844
b78b3a29
TBA
3845 /* Temporarily disable pagination. Otherwise, the user would be
3846 given an option to press 'q' to quit, which would cause an early
3847 exit and could leave GDB in a half-baked state. */
3848 scoped_restore save_pagination
3849 = make_scoped_restore (&pagination_enabled, false);
3850
d3d4baed 3851 /* End up with readline processing input, if necessary. */
d238133d
TT
3852 {
3853 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3854
3855 /* We're handling a live event, so make sure we're doing live
3856 debugging. If we're looking at traceframes while the target is
3857 running, we're going to need to get back to that mode after
3858 handling the event. */
3859 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3860 if (non_stop)
3861 {
3862 maybe_restore_traceframe.emplace ();
3863 set_current_traceframe (-1);
3864 }
43ff13b4 3865
873657b9
PA
3866 /* The user/frontend should not notice a thread switch due to
3867 internal events. Make sure we revert to the user selected
3868 thread and frame after handling the event and running any
3869 breakpoint commands. */
3870 scoped_restore_current_thread restore_thread;
d238133d
TT
3871
3872 overlay_cache_invalid = 1;
3873 /* Flush target cache before starting to handle each event. Target
3874 was running and cache could be stale. This is just a heuristic.
3875 Running threads may modify target memory, but we don't get any
3876 event. */
3877 target_dcache_invalidate ();
3878
3879 scoped_restore save_exec_dir
3880 = make_scoped_restore (&execution_direction,
3881 target_execution_direction ());
3882
5b6d1e4f
PA
3883 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3884 return;
3885
3886 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3887
3888 /* Switch to the target that generated the event, so we can do
7f08fd51
TBA
3889 target calls. */
3890 switch_to_target_no_thread (ecs->target);
d238133d
TT
3891
3892 if (debug_infrun)
5b6d1e4f 3893 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
3894
3895 /* If an error happens while handling the event, propagate GDB's
3896 knowledge of the executing state to the frontend/user running
3897 state. */
3898 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 3899 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 3900
979a0d13 3901 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
3902 still for the thread which has thrown the exception. */
3903 auto defer_bpstat_clear
3904 = make_scope_exit (bpstat_clear_actions);
3905 auto defer_delete_threads
3906 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3907
3908 /* Now figure out what to do with the result of the result. */
3909 handle_inferior_event (ecs);
3910
3911 if (!ecs->wait_some_more)
3912 {
5b6d1e4f 3913 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
758cb810 3914 bool should_stop = true;
d238133d 3915 struct thread_info *thr = ecs->event_thread;
d6b48e9c 3916
d238133d 3917 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3918
d238133d
TT
3919 if (thr != NULL)
3920 {
3921 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 3922
d238133d 3923 if (thread_fsm != NULL)
46e3ed7f 3924 should_stop = thread_fsm->should_stop (thr);
d238133d 3925 }
243a9253 3926
d238133d
TT
3927 if (!should_stop)
3928 {
3929 keep_going (ecs);
3930 }
3931 else
3932 {
46e3ed7f 3933 bool should_notify_stop = true;
d238133d 3934 int proceeded = 0;
1840d81a 3935
d238133d 3936 clean_up_just_stopped_threads_fsms (ecs);
243a9253 3937
d238133d 3938 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 3939 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 3940
d238133d
TT
3941 if (should_notify_stop)
3942 {
3943 /* We may not find an inferior if this was a process exit. */
3944 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3945 proceeded = normal_stop ();
3946 }
243a9253 3947
d238133d
TT
3948 if (!proceeded)
3949 {
b1a35af2 3950 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
3951 cmd_done = 1;
3952 }
873657b9
PA
3953
3954 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
3955 previously selected thread is gone. We have two
3956 choices - switch to no thread selected, or restore the
3957 previously selected thread (now exited). We chose the
3958 later, just because that's what GDB used to do. After
3959 this, "info threads" says "The current thread <Thread
3960 ID 2> has terminated." instead of "No thread
3961 selected.". */
3962 if (!non_stop
3963 && cmd_done
3964 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3965 restore_thread.dont_restore ();
d238133d
TT
3966 }
3967 }
4f8d22e3 3968
d238133d
TT
3969 defer_delete_threads.release ();
3970 defer_bpstat_clear.release ();
29f49a6a 3971
d238133d
TT
3972 /* No error, don't finish the thread states yet. */
3973 finish_state.release ();
731f534f 3974
d238133d
TT
3975 /* This scope is used to ensure that readline callbacks are
3976 reinstalled here. */
3977 }
4f8d22e3 3978
3b12939d
PA
3979 /* If a UI was in sync execution mode, and now isn't, restore its
3980 prompt (a synchronous execution command has finished, and we're
3981 ready for input). */
3982 all_uis_check_sync_execution_done ();
0f641c01
PA
3983
3984 if (cmd_done
0f641c01 3985 && exec_done_display_p
00431a78
PA
3986 && (inferior_ptid == null_ptid
3987 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 3988 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
3989}
3990
29734269
SM
3991/* See infrun.h. */
3992
edb3359d 3993void
29734269
SM
3994set_step_info (thread_info *tp, struct frame_info *frame,
3995 struct symtab_and_line sal)
edb3359d 3996{
29734269
SM
3997 /* This can be removed once this function no longer implicitly relies on the
3998 inferior_ptid value. */
3999 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 4000
16c381f0
JK
4001 tp->control.step_frame_id = get_frame_id (frame);
4002 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
4003
4004 tp->current_symtab = sal.symtab;
4005 tp->current_line = sal.line;
4006}
4007
0d1e5fa7
PA
4008/* Clear context switchable stepping state. */
4009
4010void
4e1c45ea 4011init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 4012{
7f5ef605 4013 tss->stepped_breakpoint = 0;
0d1e5fa7 4014 tss->stepping_over_breakpoint = 0;
963f9c80 4015 tss->stepping_over_watchpoint = 0;
0d1e5fa7 4016 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
4017}
4018
ab1ddbcf 4019/* See infrun.h. */
c32c64b7 4020
6efcd9a8 4021void
5b6d1e4f
PA
4022set_last_target_status (process_stratum_target *target, ptid_t ptid,
4023 target_waitstatus status)
c32c64b7 4024{
5b6d1e4f 4025 target_last_proc_target = target;
c32c64b7
DE
4026 target_last_wait_ptid = ptid;
4027 target_last_waitstatus = status;
4028}
4029
ab1ddbcf 4030/* See infrun.h. */
e02bc4cc
DS
4031
4032void
5b6d1e4f
PA
4033get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4034 target_waitstatus *status)
e02bc4cc 4035{
5b6d1e4f
PA
4036 if (target != nullptr)
4037 *target = target_last_proc_target;
ab1ddbcf
PA
4038 if (ptid != nullptr)
4039 *ptid = target_last_wait_ptid;
4040 if (status != nullptr)
4041 *status = target_last_waitstatus;
e02bc4cc
DS
4042}
4043
ab1ddbcf
PA
4044/* See infrun.h. */
4045
ac264b3b
MS
4046void
4047nullify_last_target_wait_ptid (void)
4048{
5b6d1e4f 4049 target_last_proc_target = nullptr;
ac264b3b 4050 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4051 target_last_waitstatus = {};
ac264b3b
MS
4052}
4053
dcf4fbde 4054/* Switch thread contexts. */
dd80620e
MS
4055
4056static void
00431a78 4057context_switch (execution_control_state *ecs)
dd80620e 4058{
1eb8556f 4059 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4060 && (inferior_ptid == null_ptid
4061 || ecs->event_thread != inferior_thread ()))
fd48f117 4062 {
1eb8556f
SM
4063 infrun_debug_printf ("Switching context from %s to %s",
4064 target_pid_to_str (inferior_ptid).c_str (),
4065 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4066 }
4067
00431a78 4068 switch_to_thread (ecs->event_thread);
dd80620e
MS
4069}
4070
d8dd4d5f
PA
4071/* If the target can't tell whether we've hit breakpoints
4072 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4073 check whether that could have been caused by a breakpoint. If so,
4074 adjust the PC, per gdbarch_decr_pc_after_break. */
4075
4fa8626c 4076static void
d8dd4d5f
PA
4077adjust_pc_after_break (struct thread_info *thread,
4078 struct target_waitstatus *ws)
4fa8626c 4079{
24a73cce
UW
4080 struct regcache *regcache;
4081 struct gdbarch *gdbarch;
118e6252 4082 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4083
4fa8626c
DJ
4084 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4085 we aren't, just return.
9709f61c
DJ
4086
4087 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4088 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4089 implemented by software breakpoints should be handled through the normal
4090 breakpoint layer.
8fb3e588 4091
4fa8626c
DJ
4092 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4093 different signals (SIGILL or SIGEMT for instance), but it is less
4094 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4095 gdbarch_decr_pc_after_break. I don't know any specific target that
4096 generates these signals at breakpoints (the code has been in GDB since at
4097 least 1992) so I can not guess how to handle them here.
8fb3e588 4098
e6cf7916
UW
4099 In earlier versions of GDB, a target with
4100 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4101 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4102 target with both of these set in GDB history, and it seems unlikely to be
4103 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4104
d8dd4d5f 4105 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4106 return;
4107
d8dd4d5f 4108 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4109 return;
4110
4058b839
PA
4111 /* In reverse execution, when a breakpoint is hit, the instruction
4112 under it has already been de-executed. The reported PC always
4113 points at the breakpoint address, so adjusting it further would
4114 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4115 architecture:
4116
4117 B1 0x08000000 : INSN1
4118 B2 0x08000001 : INSN2
4119 0x08000002 : INSN3
4120 PC -> 0x08000003 : INSN4
4121
4122 Say you're stopped at 0x08000003 as above. Reverse continuing
4123 from that point should hit B2 as below. Reading the PC when the
4124 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4125 been de-executed already.
4126
4127 B1 0x08000000 : INSN1
4128 B2 PC -> 0x08000001 : INSN2
4129 0x08000002 : INSN3
4130 0x08000003 : INSN4
4131
4132 We can't apply the same logic as for forward execution, because
4133 we would wrongly adjust the PC to 0x08000000, since there's a
4134 breakpoint at PC - 1. We'd then report a hit on B1, although
4135 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4136 behaviour. */
4137 if (execution_direction == EXEC_REVERSE)
4138 return;
4139
1cf4d951
PA
4140 /* If the target can tell whether the thread hit a SW breakpoint,
4141 trust it. Targets that can tell also adjust the PC
4142 themselves. */
4143 if (target_supports_stopped_by_sw_breakpoint ())
4144 return;
4145
4146 /* Note that relying on whether a breakpoint is planted in memory to
4147 determine this can fail. E.g,. the breakpoint could have been
4148 removed since. Or the thread could have been told to step an
4149 instruction the size of a breakpoint instruction, and only
4150 _after_ was a breakpoint inserted at its address. */
4151
24a73cce
UW
4152 /* If this target does not decrement the PC after breakpoints, then
4153 we have nothing to do. */
00431a78 4154 regcache = get_thread_regcache (thread);
ac7936df 4155 gdbarch = regcache->arch ();
118e6252 4156
527a273a 4157 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4158 if (decr_pc == 0)
24a73cce
UW
4159 return;
4160
8b86c959 4161 const address_space *aspace = regcache->aspace ();
6c95b8df 4162
8aad930b
AC
4163 /* Find the location where (if we've hit a breakpoint) the
4164 breakpoint would be. */
118e6252 4165 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4166
1cf4d951
PA
4167 /* If the target can't tell whether a software breakpoint triggered,
4168 fallback to figuring it out based on breakpoints we think were
4169 inserted in the target, and on whether the thread was stepped or
4170 continued. */
4171
1c5cfe86
PA
4172 /* Check whether there actually is a software breakpoint inserted at
4173 that location.
4174
4175 If in non-stop mode, a race condition is possible where we've
4176 removed a breakpoint, but stop events for that breakpoint were
4177 already queued and arrive later. To suppress those spurious
4178 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4179 and retire them after a number of stop events are reported. Note
4180 this is an heuristic and can thus get confused. The real fix is
4181 to get the "stopped by SW BP and needs adjustment" info out of
4182 the target/kernel (and thus never reach here; see above). */
6c95b8df 4183 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4184 || (target_is_non_stop_p ()
4185 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4186 {
07036511 4187 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4188
8213266a 4189 if (record_full_is_used ())
07036511
TT
4190 restore_operation_disable.emplace
4191 (record_full_gdb_operation_disable_set ());
96429cc8 4192
1c0fdd0e
UW
4193 /* When using hardware single-step, a SIGTRAP is reported for both
4194 a completed single-step and a software breakpoint. Need to
4195 differentiate between the two, as the latter needs adjusting
4196 but the former does not.
4197
4198 The SIGTRAP can be due to a completed hardware single-step only if
4199 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4200 - this thread is currently being stepped
4201
4202 If any of these events did not occur, we must have stopped due
4203 to hitting a software breakpoint, and have to back up to the
4204 breakpoint address.
4205
4206 As a special case, we could have hardware single-stepped a
4207 software breakpoint. In this case (prev_pc == breakpoint_pc),
4208 we also need to back up to the breakpoint address. */
4209
d8dd4d5f
PA
4210 if (thread_has_single_step_breakpoints_set (thread)
4211 || !currently_stepping (thread)
4212 || (thread->stepped_breakpoint
4213 && thread->prev_pc == breakpoint_pc))
515630c5 4214 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4215 }
4fa8626c
DJ
4216}
4217
c4464ade 4218static bool
edb3359d
DJ
4219stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4220{
4221 for (frame = get_prev_frame (frame);
4222 frame != NULL;
4223 frame = get_prev_frame (frame))
4224 {
4225 if (frame_id_eq (get_frame_id (frame), step_frame_id))
c4464ade
SM
4226 return true;
4227
edb3359d
DJ
4228 if (get_frame_type (frame) != INLINE_FRAME)
4229 break;
4230 }
4231
c4464ade 4232 return false;
edb3359d
DJ
4233}
4234
4a4c04f1
BE
4235/* Look for an inline frame that is marked for skip.
4236 If PREV_FRAME is TRUE start at the previous frame,
4237 otherwise start at the current frame. Stop at the
4238 first non-inline frame, or at the frame where the
4239 step started. */
4240
4241static bool
4242inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4243{
4244 struct frame_info *frame = get_current_frame ();
4245
4246 if (prev_frame)
4247 frame = get_prev_frame (frame);
4248
4249 for (; frame != NULL; frame = get_prev_frame (frame))
4250 {
4251 const char *fn = NULL;
4252 symtab_and_line sal;
4253 struct symbol *sym;
4254
4255 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4256 break;
4257 if (get_frame_type (frame) != INLINE_FRAME)
4258 break;
4259
4260 sal = find_frame_sal (frame);
4261 sym = get_frame_function (frame);
4262
4263 if (sym != NULL)
4264 fn = sym->print_name ();
4265
4266 if (sal.line != 0
4267 && function_name_is_marked_for_skip (fn, sal))
4268 return true;
4269 }
4270
4271 return false;
4272}
4273
c65d6b55
PA
4274/* If the event thread has the stop requested flag set, pretend it
4275 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4276 target_stop). */
4277
4278static bool
4279handle_stop_requested (struct execution_control_state *ecs)
4280{
4281 if (ecs->event_thread->stop_requested)
4282 {
4283 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4284 ecs->ws.value.sig = GDB_SIGNAL_0;
4285 handle_signal_stop (ecs);
4286 return true;
4287 }
4288 return false;
4289}
4290
a96d9b2e 4291/* Auxiliary function that handles syscall entry/return events.
c4464ade
SM
4292 It returns true if the inferior should keep going (and GDB
4293 should ignore the event), or false if the event deserves to be
a96d9b2e 4294 processed. */
ca2163eb 4295
c4464ade 4296static bool
ca2163eb 4297handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4298{
ca2163eb 4299 struct regcache *regcache;
ca2163eb
PA
4300 int syscall_number;
4301
00431a78 4302 context_switch (ecs);
ca2163eb 4303
00431a78 4304 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4305 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4306 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4307
a96d9b2e
SDJ
4308 if (catch_syscall_enabled () > 0
4309 && catching_syscall_number (syscall_number) > 0)
4310 {
1eb8556f 4311 infrun_debug_printf ("syscall number=%d", syscall_number);
a96d9b2e 4312
16c381f0 4313 ecs->event_thread->control.stop_bpstat
a01bda52 4314 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4315 ecs->event_thread->suspend.stop_pc,
4316 ecs->event_thread, &ecs->ws);
ab04a2af 4317
c65d6b55 4318 if (handle_stop_requested (ecs))
c4464ade 4319 return false;
c65d6b55 4320
ce12b012 4321 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4322 {
4323 /* Catchpoint hit. */
c4464ade 4324 return false;
ca2163eb 4325 }
a96d9b2e 4326 }
ca2163eb 4327
c65d6b55 4328 if (handle_stop_requested (ecs))
c4464ade 4329 return false;
c65d6b55 4330
ca2163eb 4331 /* If no catchpoint triggered for this, then keep going. */
ca2163eb 4332 keep_going (ecs);
c4464ade
SM
4333
4334 return true;
a96d9b2e
SDJ
4335}
4336
7e324e48
GB
4337/* Lazily fill in the execution_control_state's stop_func_* fields. */
4338
4339static void
4340fill_in_stop_func (struct gdbarch *gdbarch,
4341 struct execution_control_state *ecs)
4342{
4343 if (!ecs->stop_func_filled_in)
4344 {
98a617f8 4345 const block *block;
fe830662 4346 const general_symbol_info *gsi;
98a617f8 4347
7e324e48
GB
4348 /* Don't care about return value; stop_func_start and stop_func_name
4349 will both be 0 if it doesn't work. */
fe830662
TT
4350 find_pc_partial_function_sym (ecs->event_thread->suspend.stop_pc,
4351 &gsi,
4352 &ecs->stop_func_start,
4353 &ecs->stop_func_end,
4354 &block);
4355 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
98a617f8
KB
4356
4357 /* The call to find_pc_partial_function, above, will set
4358 stop_func_start and stop_func_end to the start and end
4359 of the range containing the stop pc. If this range
4360 contains the entry pc for the block (which is always the
4361 case for contiguous blocks), advance stop_func_start past
4362 the function's start offset and entrypoint. Note that
4363 stop_func_start is NOT advanced when in a range of a
4364 non-contiguous block that does not contain the entry pc. */
4365 if (block != nullptr
4366 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4367 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4368 {
4369 ecs->stop_func_start
4370 += gdbarch_deprecated_function_start_offset (gdbarch);
4371
4372 if (gdbarch_skip_entrypoint_p (gdbarch))
4373 ecs->stop_func_start
4374 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4375 }
591a12a1 4376
7e324e48
GB
4377 ecs->stop_func_filled_in = 1;
4378 }
4379}
4380
4f5d7f63 4381
00431a78 4382/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4383
4384static enum stop_kind
00431a78 4385get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4386{
5b6d1e4f 4387 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4388
4389 gdb_assert (inf != NULL);
4390 return inf->control.stop_soon;
4391}
4392
5b6d1e4f
PA
4393/* Poll for one event out of the current target. Store the resulting
4394 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4395
4396static ptid_t
5b6d1e4f 4397poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4398{
4399 ptid_t event_ptid;
372316f1
PA
4400
4401 overlay_cache_invalid = 1;
4402
4403 /* Flush target cache before starting to handle each event.
4404 Target was running and cache could be stale. This is just a
4405 heuristic. Running threads may modify target memory, but we
4406 don't get any event. */
4407 target_dcache_invalidate ();
4408
4409 if (deprecated_target_wait_hook)
5b6d1e4f 4410 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4411 else
5b6d1e4f 4412 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4413
4414 if (debug_infrun)
5b6d1e4f 4415 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4416
4417 return event_ptid;
4418}
4419
5b6d1e4f
PA
4420/* An event reported by wait_one. */
4421
4422struct wait_one_event
4423{
4424 /* The target the event came out of. */
4425 process_stratum_target *target;
4426
4427 /* The PTID the event was for. */
4428 ptid_t ptid;
4429
4430 /* The waitstatus. */
4431 target_waitstatus ws;
4432};
4433
4434/* Wait for one event out of any target. */
4435
4436static wait_one_event
4437wait_one ()
4438{
4439 while (1)
4440 {
4441 for (inferior *inf : all_inferiors ())
4442 {
4443 process_stratum_target *target = inf->process_target ();
4444 if (target == NULL
4445 || !target->is_async_p ()
4446 || !target->threads_executing)
4447 continue;
4448
4449 switch_to_inferior_no_thread (inf);
4450
4451 wait_one_event event;
4452 event.target = target;
4453 event.ptid = poll_one_curr_target (&event.ws);
4454
4455 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4456 {
4457 /* If nothing is resumed, remove the target from the
4458 event loop. */
4459 target_async (0);
4460 }
4461 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4462 return event;
4463 }
4464
4465 /* Block waiting for some event. */
4466
4467 fd_set readfds;
4468 int nfds = 0;
4469
4470 FD_ZERO (&readfds);
4471
4472 for (inferior *inf : all_inferiors ())
4473 {
4474 process_stratum_target *target = inf->process_target ();
4475 if (target == NULL
4476 || !target->is_async_p ()
4477 || !target->threads_executing)
4478 continue;
4479
4480 int fd = target->async_wait_fd ();
4481 FD_SET (fd, &readfds);
4482 if (nfds <= fd)
4483 nfds = fd + 1;
4484 }
4485
4486 if (nfds == 0)
4487 {
4488 /* No waitable targets left. All must be stopped. */
4489 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4490 }
4491
4492 QUIT;
4493
4494 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4495 if (numfds < 0)
4496 {
4497 if (errno == EINTR)
4498 continue;
4499 else
4500 perror_with_name ("interruptible_select");
4501 }
4502 }
4503}
4504
372316f1
PA
4505/* Save the thread's event and stop reason to process it later. */
4506
4507static void
5b6d1e4f 4508save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4509{
1eb8556f
SM
4510 infrun_debug_printf ("saving status %s for %d.%ld.%ld",
4511 target_waitstatus_to_string (ws).c_str (),
4512 tp->ptid.pid (),
4513 tp->ptid.lwp (),
4514 tp->ptid.tid ());
372316f1
PA
4515
4516 /* Record for later. */
4517 tp->suspend.waitstatus = *ws;
4518 tp->suspend.waitstatus_pending_p = 1;
4519
00431a78 4520 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4521 const address_space *aspace = regcache->aspace ();
372316f1
PA
4522
4523 if (ws->kind == TARGET_WAITKIND_STOPPED
4524 && ws->value.sig == GDB_SIGNAL_TRAP)
4525 {
4526 CORE_ADDR pc = regcache_read_pc (regcache);
4527
4528 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4529
18493a00
PA
4530 scoped_restore_current_thread restore_thread;
4531 switch_to_thread (tp);
4532
4533 if (target_stopped_by_watchpoint ())
372316f1
PA
4534 {
4535 tp->suspend.stop_reason
4536 = TARGET_STOPPED_BY_WATCHPOINT;
4537 }
4538 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 4539 && target_stopped_by_sw_breakpoint ())
372316f1
PA
4540 {
4541 tp->suspend.stop_reason
4542 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4543 }
4544 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 4545 && target_stopped_by_hw_breakpoint ())
372316f1
PA
4546 {
4547 tp->suspend.stop_reason
4548 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4549 }
4550 else if (!target_supports_stopped_by_hw_breakpoint ()
4551 && hardware_breakpoint_inserted_here_p (aspace,
4552 pc))
4553 {
4554 tp->suspend.stop_reason
4555 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4556 }
4557 else if (!target_supports_stopped_by_sw_breakpoint ()
4558 && software_breakpoint_inserted_here_p (aspace,
4559 pc))
4560 {
4561 tp->suspend.stop_reason
4562 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4563 }
4564 else if (!thread_has_single_step_breakpoints_set (tp)
4565 && currently_stepping (tp))
4566 {
4567 tp->suspend.stop_reason
4568 = TARGET_STOPPED_BY_SINGLE_STEP;
4569 }
4570 }
4571}
4572
293b3ebc
TBA
4573/* Mark the non-executing threads accordingly. In all-stop, all
4574 threads of all processes are stopped when we get any event
4575 reported. In non-stop mode, only the event thread stops. */
4576
4577static void
4578mark_non_executing_threads (process_stratum_target *target,
4579 ptid_t event_ptid,
4580 struct target_waitstatus ws)
4581{
4582 ptid_t mark_ptid;
4583
4584 if (!target_is_non_stop_p ())
4585 mark_ptid = minus_one_ptid;
4586 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4587 || ws.kind == TARGET_WAITKIND_EXITED)
4588 {
4589 /* If we're handling a process exit in non-stop mode, even
4590 though threads haven't been deleted yet, one would think
4591 that there is nothing to do, as threads of the dead process
4592 will be soon deleted, and threads of any other process were
4593 left running. However, on some targets, threads survive a
4594 process exit event. E.g., for the "checkpoint" command,
4595 when the current checkpoint/fork exits, linux-fork.c
4596 automatically switches to another fork from within
4597 target_mourn_inferior, by associating the same
4598 inferior/thread to another fork. We haven't mourned yet at
4599 this point, but we must mark any threads left in the
4600 process as not-executing so that finish_thread_state marks
4601 them stopped (in the user's perspective) if/when we present
4602 the stop to the user. */
4603 mark_ptid = ptid_t (event_ptid.pid ());
4604 }
4605 else
4606 mark_ptid = event_ptid;
4607
4608 set_executing (target, mark_ptid, false);
4609
4610 /* Likewise the resumed flag. */
4611 set_resumed (target, mark_ptid, false);
4612}
4613
6efcd9a8 4614/* See infrun.h. */
372316f1 4615
6efcd9a8 4616void
372316f1
PA
4617stop_all_threads (void)
4618{
4619 /* We may need multiple passes to discover all threads. */
4620 int pass;
4621 int iterations = 0;
372316f1 4622
53cccef1 4623 gdb_assert (exists_non_stop_target ());
372316f1 4624
1eb8556f 4625 infrun_debug_printf ("starting");
372316f1 4626
00431a78 4627 scoped_restore_current_thread restore_thread;
372316f1 4628
6ad82919
TBA
4629 /* Enable thread events of all targets. */
4630 for (auto *target : all_non_exited_process_targets ())
4631 {
4632 switch_to_target_no_thread (target);
4633 target_thread_events (true);
4634 }
4635
4636 SCOPE_EXIT
4637 {
4638 /* Disable thread events of all targets. */
4639 for (auto *target : all_non_exited_process_targets ())
4640 {
4641 switch_to_target_no_thread (target);
4642 target_thread_events (false);
4643 }
4644
17417fb0 4645 /* Use debug_prefixed_printf directly to get a meaningful function
dda83cd7 4646 name. */
6ad82919 4647 if (debug_infrun)
17417fb0 4648 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
6ad82919 4649 };
65706a29 4650
372316f1
PA
4651 /* Request threads to stop, and then wait for the stops. Because
4652 threads we already know about can spawn more threads while we're
4653 trying to stop them, and we only learn about new threads when we
4654 update the thread list, do this in a loop, and keep iterating
4655 until two passes find no threads that need to be stopped. */
4656 for (pass = 0; pass < 2; pass++, iterations++)
4657 {
1eb8556f 4658 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
372316f1
PA
4659 while (1)
4660 {
29d6859f 4661 int waits_needed = 0;
372316f1 4662
a05575d3
TBA
4663 for (auto *target : all_non_exited_process_targets ())
4664 {
4665 switch_to_target_no_thread (target);
4666 update_thread_list ();
4667 }
372316f1
PA
4668
4669 /* Go through all threads looking for threads that we need
4670 to tell the target to stop. */
08036331 4671 for (thread_info *t : all_non_exited_threads ())
372316f1 4672 {
53cccef1
TBA
4673 /* For a single-target setting with an all-stop target,
4674 we would not even arrive here. For a multi-target
4675 setting, until GDB is able to handle a mixture of
4676 all-stop and non-stop targets, simply skip all-stop
4677 targets' threads. This should be fine due to the
4678 protection of 'check_multi_target_resumption'. */
4679
4680 switch_to_thread_no_regs (t);
4681 if (!target_is_non_stop_p ())
4682 continue;
4683
372316f1
PA
4684 if (t->executing)
4685 {
4686 /* If already stopping, don't request a stop again.
4687 We just haven't seen the notification yet. */
4688 if (!t->stop_requested)
4689 {
1eb8556f
SM
4690 infrun_debug_printf (" %s executing, need stop",
4691 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4692 target_stop (t->ptid);
4693 t->stop_requested = 1;
4694 }
4695 else
4696 {
1eb8556f
SM
4697 infrun_debug_printf (" %s executing, already stopping",
4698 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4699 }
4700
4701 if (t->stop_requested)
29d6859f 4702 waits_needed++;
372316f1
PA
4703 }
4704 else
4705 {
1eb8556f
SM
4706 infrun_debug_printf (" %s not executing",
4707 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4708
4709 /* The thread may be not executing, but still be
4710 resumed with a pending status to process. */
719546c4 4711 t->resumed = false;
372316f1
PA
4712 }
4713 }
4714
29d6859f 4715 if (waits_needed == 0)
372316f1
PA
4716 break;
4717
4718 /* If we find new threads on the second iteration, restart
4719 over. We want to see two iterations in a row with all
4720 threads stopped. */
4721 if (pass > 0)
4722 pass = -1;
4723
29d6859f 4724 for (int i = 0; i < waits_needed; i++)
c29705b7 4725 {
29d6859f 4726 wait_one_event event = wait_one ();
a05575d3 4727
1eb8556f
SM
4728 infrun_debug_printf
4729 ("%s %s", target_waitstatus_to_string (&event.ws).c_str (),
4730 target_pid_to_str (event.ptid).c_str ());
a05575d3 4731
29d6859f 4732 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
a05575d3 4733 {
29d6859f
LM
4734 /* All resumed threads exited. */
4735 break;
a05575d3 4736 }
29d6859f
LM
4737 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4738 || event.ws.kind == TARGET_WAITKIND_EXITED
4739 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
6efcd9a8 4740 {
29d6859f 4741 /* One thread/process exited/signalled. */
6efcd9a8 4742
29d6859f 4743 thread_info *t = nullptr;
372316f1 4744
29d6859f
LM
4745 /* The target may have reported just a pid. If so, try
4746 the first non-exited thread. */
4747 if (event.ptid.is_pid ())
372316f1 4748 {
29d6859f
LM
4749 int pid = event.ptid.pid ();
4750 inferior *inf = find_inferior_pid (event.target, pid);
4751 for (thread_info *tp : inf->non_exited_threads ())
372316f1 4752 {
29d6859f
LM
4753 t = tp;
4754 break;
372316f1 4755 }
29d6859f
LM
4756
4757 /* If there is no available thread, the event would
4758 have to be appended to a per-inferior event list,
4759 which does not exist (and if it did, we'd have
4760 to adjust run control command to be able to
4761 resume such an inferior). We assert here instead
4762 of going into an infinite loop. */
4763 gdb_assert (t != nullptr);
4764
1eb8556f
SM
4765 infrun_debug_printf
4766 ("using %s", target_pid_to_str (t->ptid).c_str ());
29d6859f
LM
4767 }
4768 else
4769 {
4770 t = find_thread_ptid (event.target, event.ptid);
4771 /* Check if this is the first time we see this thread.
4772 Don't bother adding if it individually exited. */
4773 if (t == nullptr
4774 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4775 t = add_thread (event.target, event.ptid);
4776 }
4777
4778 if (t != nullptr)
4779 {
4780 /* Set the threads as non-executing to avoid
4781 another stop attempt on them. */
4782 switch_to_thread_no_regs (t);
4783 mark_non_executing_threads (event.target, event.ptid,
4784 event.ws);
4785 save_waitstatus (t, &event.ws);
4786 t->stop_requested = false;
372316f1
PA
4787 }
4788 }
4789 else
4790 {
29d6859f
LM
4791 thread_info *t = find_thread_ptid (event.target, event.ptid);
4792 if (t == NULL)
4793 t = add_thread (event.target, event.ptid);
372316f1 4794
29d6859f
LM
4795 t->stop_requested = 0;
4796 t->executing = 0;
4797 t->resumed = false;
4798 t->control.may_range_step = 0;
4799
4800 /* This may be the first time we see the inferior report
4801 a stop. */
4802 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4803 if (inf->needs_setup)
372316f1 4804 {
29d6859f
LM
4805 switch_to_thread_no_regs (t);
4806 setup_inferior (0);
372316f1
PA
4807 }
4808
29d6859f
LM
4809 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4810 && event.ws.value.sig == GDB_SIGNAL_0)
372316f1 4811 {
29d6859f
LM
4812 /* We caught the event that we intended to catch, so
4813 there's no event pending. */
4814 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4815 t->suspend.waitstatus_pending_p = 0;
4816
4817 if (displaced_step_fixup (t, GDB_SIGNAL_0) < 0)
4818 {
4819 /* Add it back to the step-over queue. */
1eb8556f
SM
4820 infrun_debug_printf
4821 ("displaced-step of %s canceled: adding back to "
4822 "the step-over queue",
4823 target_pid_to_str (t->ptid).c_str ());
4824
29d6859f
LM
4825 t->control.trap_expected = 0;
4826 thread_step_over_chain_enqueue (t);
4827 }
372316f1 4828 }
29d6859f
LM
4829 else
4830 {
4831 enum gdb_signal sig;
4832 struct regcache *regcache;
372316f1 4833
1eb8556f
SM
4834 infrun_debug_printf
4835 ("target_wait %s, saving status for %d.%ld.%ld",
4836 target_waitstatus_to_string (&event.ws).c_str (),
4837 t->ptid.pid (), t->ptid.lwp (), t->ptid.tid ());
29d6859f
LM
4838
4839 /* Record for later. */
4840 save_waitstatus (t, &event.ws);
4841
4842 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4843 ? event.ws.value.sig : GDB_SIGNAL_0);
4844
4845 if (displaced_step_fixup (t, sig) < 0)
4846 {
4847 /* Add it back to the step-over queue. */
4848 t->control.trap_expected = 0;
4849 thread_step_over_chain_enqueue (t);
4850 }
4851
4852 regcache = get_thread_regcache (t);
4853 t->suspend.stop_pc = regcache_read_pc (regcache);
4854
1eb8556f
SM
4855 infrun_debug_printf ("saved stop_pc=%s for %s "
4856 "(currently_stepping=%d)",
4857 paddress (target_gdbarch (),
4858 t->suspend.stop_pc),
4859 target_pid_to_str (t->ptid).c_str (),
4860 currently_stepping (t));
372316f1
PA
4861 }
4862 }
4863 }
4864 }
4865 }
372316f1
PA
4866}
4867
f4836ba9
PA
4868/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4869
c4464ade 4870static bool
f4836ba9
PA
4871handle_no_resumed (struct execution_control_state *ecs)
4872{
3b12939d 4873 if (target_can_async_p ())
f4836ba9 4874 {
c4464ade 4875 bool any_sync = false;
f4836ba9 4876
2dab0c7b 4877 for (ui *ui : all_uis ())
3b12939d
PA
4878 {
4879 if (ui->prompt_state == PROMPT_BLOCKED)
4880 {
c4464ade 4881 any_sync = true;
3b12939d
PA
4882 break;
4883 }
4884 }
4885 if (!any_sync)
4886 {
4887 /* There were no unwaited-for children left in the target, but,
4888 we're not synchronously waiting for events either. Just
4889 ignore. */
4890
1eb8556f 4891 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d 4892 prepare_to_wait (ecs);
c4464ade 4893 return true;
3b12939d 4894 }
f4836ba9
PA
4895 }
4896
4897 /* Otherwise, if we were running a synchronous execution command, we
4898 may need to cancel it and give the user back the terminal.
4899
4900 In non-stop mode, the target can't tell whether we've already
4901 consumed previous stop events, so it can end up sending us a
4902 no-resumed event like so:
4903
4904 #0 - thread 1 is left stopped
4905
4906 #1 - thread 2 is resumed and hits breakpoint
dda83cd7 4907 -> TARGET_WAITKIND_STOPPED
f4836ba9
PA
4908
4909 #2 - thread 3 is resumed and exits
dda83cd7 4910 this is the last resumed thread, so
f4836ba9
PA
4911 -> TARGET_WAITKIND_NO_RESUMED
4912
4913 #3 - gdb processes stop for thread 2 and decides to re-resume
dda83cd7 4914 it.
f4836ba9
PA
4915
4916 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
dda83cd7 4917 thread 2 is now resumed, so the event should be ignored.
f4836ba9
PA
4918
4919 IOW, if the stop for thread 2 doesn't end a foreground command,
4920 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4921 event. But it could be that the event meant that thread 2 itself
4922 (or whatever other thread was the last resumed thread) exited.
4923
4924 To address this we refresh the thread list and check whether we
4925 have resumed threads _now_. In the example above, this removes
4926 thread 3 from the thread list. If thread 2 was re-resumed, we
4927 ignore this event. If we find no thread resumed, then we cancel
7d3badc6
PA
4928 the synchronous command and show "no unwaited-for " to the
4929 user. */
f4836ba9 4930
d6cc5d98 4931 inferior *curr_inf = current_inferior ();
7d3badc6 4932
d6cc5d98
PA
4933 scoped_restore_current_thread restore_thread;
4934
4935 for (auto *target : all_non_exited_process_targets ())
4936 {
4937 switch_to_target_no_thread (target);
4938 update_thread_list ();
4939 }
4940
4941 /* If:
4942
4943 - the current target has no thread executing, and
4944 - the current inferior is native, and
4945 - the current inferior is the one which has the terminal, and
4946 - we did nothing,
4947
4948 then a Ctrl-C from this point on would remain stuck in the
4949 kernel, until a thread resumes and dequeues it. That would
4950 result in the GDB CLI not reacting to Ctrl-C, not able to
4951 interrupt the program. To address this, if the current inferior
4952 no longer has any thread executing, we give the terminal to some
4953 other inferior that has at least one thread executing. */
4954 bool swap_terminal = true;
4955
4956 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
4957 whether to report it to the user. */
4958 bool ignore_event = false;
7d3badc6
PA
4959
4960 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 4961 {
d6cc5d98
PA
4962 if (swap_terminal && thread->executing)
4963 {
4964 if (thread->inf != curr_inf)
4965 {
4966 target_terminal::ours ();
4967
4968 switch_to_thread (thread);
4969 target_terminal::inferior ();
4970 }
4971 swap_terminal = false;
4972 }
4973
4974 if (!ignore_event
4975 && (thread->executing
4976 || thread->suspend.waitstatus_pending_p))
f4836ba9 4977 {
7d3badc6
PA
4978 /* Either there were no unwaited-for children left in the
4979 target at some point, but there are now, or some target
4980 other than the eventing one has unwaited-for children
4981 left. Just ignore. */
1eb8556f
SM
4982 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
4983 "(ignoring: found resumed)");
d6cc5d98
PA
4984
4985 ignore_event = true;
f4836ba9 4986 }
d6cc5d98
PA
4987
4988 if (ignore_event && !swap_terminal)
4989 break;
4990 }
4991
4992 if (ignore_event)
4993 {
4994 switch_to_inferior_no_thread (curr_inf);
4995 prepare_to_wait (ecs);
c4464ade 4996 return true;
f4836ba9
PA
4997 }
4998
4999 /* Go ahead and report the event. */
c4464ade 5000 return false;
f4836ba9
PA
5001}
5002
05ba8510
PA
5003/* Given an execution control state that has been freshly filled in by
5004 an event from the inferior, figure out what it means and take
5005 appropriate action.
5006
5007 The alternatives are:
5008
22bcd14b 5009 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
5010 debugger.
5011
5012 2) keep_going and return; to wait for the next event (set
5013 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5014 once). */
c906108c 5015
ec9499be 5016static void
595915c1 5017handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 5018{
595915c1
TT
5019 /* Make sure that all temporary struct value objects that were
5020 created during the handling of the event get deleted at the
5021 end. */
5022 scoped_value_mark free_values;
5023
d6b48e9c
PA
5024 enum stop_kind stop_soon;
5025
1eb8556f 5026 infrun_debug_printf ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
c29705b7 5027
28736962
PA
5028 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
5029 {
5030 /* We had an event in the inferior, but we are not interested in
5031 handling it at this level. The lower layers have already
5032 done what needs to be done, if anything.
5033
5034 One of the possible circumstances for this is when the
5035 inferior produces output for the console. The inferior has
5036 not stopped, and we are ignoring the event. Another possible
5037 circumstance is any event which the lower level knows will be
5038 reported multiple times without an intervening resume. */
28736962
PA
5039 prepare_to_wait (ecs);
5040 return;
5041 }
5042
65706a29
PA
5043 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5044 {
65706a29
PA
5045 prepare_to_wait (ecs);
5046 return;
5047 }
5048
0e5bf2a8 5049 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5050 && handle_no_resumed (ecs))
5051 return;
0e5bf2a8 5052
5b6d1e4f
PA
5053 /* Cache the last target/ptid/waitstatus. */
5054 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5055
ca005067 5056 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5057 stop_stack_dummy = STOP_NONE;
ca005067 5058
0e5bf2a8
PA
5059 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5060 {
5061 /* No unwaited-for children left. IOW, all resumed children
5062 have exited. */
c4464ade 5063 stop_print_frame = false;
22bcd14b 5064 stop_waiting (ecs);
0e5bf2a8
PA
5065 return;
5066 }
5067
8c90c137 5068 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5069 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5070 {
5b6d1e4f 5071 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5072 /* If it's a new thread, add it to the thread database. */
5073 if (ecs->event_thread == NULL)
5b6d1e4f 5074 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5075
5076 /* Disable range stepping. If the next step request could use a
5077 range, this will be end up re-enabled then. */
5078 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5079 }
88ed393a
JK
5080
5081 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5082 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5083
5084 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5085 reinit_frame_cache ();
5086
28736962
PA
5087 breakpoint_retire_moribund ();
5088
2b009048
DJ
5089 /* First, distinguish signals caused by the debugger from signals
5090 that have to do with the program's own actions. Note that
5091 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5092 on the operating system version. Here we detect when a SIGILL or
5093 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5094 something similar for SIGSEGV, since a SIGSEGV will be generated
5095 when we're trying to execute a breakpoint instruction on a
5096 non-executable stack. This happens for call dummy breakpoints
5097 for architectures like SPARC that place call dummies on the
5098 stack. */
2b009048 5099 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5100 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5101 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5102 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5103 {
00431a78 5104 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5105
a01bda52 5106 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5107 regcache_read_pc (regcache)))
5108 {
1eb8556f 5109 infrun_debug_printf ("Treating signal as SIGTRAP");
a493e3e2 5110 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5111 }
2b009048
DJ
5112 }
5113
293b3ebc 5114 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 5115
488f131b
JB
5116 switch (ecs->ws.kind)
5117 {
5118 case TARGET_WAITKIND_LOADED:
00431a78 5119 context_switch (ecs);
b0f4b84b 5120 /* Ignore gracefully during startup of the inferior, as it might
dda83cd7
SM
5121 be the shell which has just loaded some objects, otherwise
5122 add the symbols for the newly loaded objects. Also ignore at
5123 the beginning of an attach or remote session; we will query
5124 the full list of libraries once the connection is
5125 established. */
4f5d7f63 5126
00431a78 5127 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 5128 if (stop_soon == NO_STOP_QUIETLY)
488f131b 5129 {
edcc5120
TT
5130 struct regcache *regcache;
5131
00431a78 5132 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
5133
5134 handle_solib_event ();
5135
5136 ecs->event_thread->control.stop_bpstat
a01bda52 5137 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
5138 ecs->event_thread->suspend.stop_pc,
5139 ecs->event_thread, &ecs->ws);
ab04a2af 5140
c65d6b55
PA
5141 if (handle_stop_requested (ecs))
5142 return;
5143
ce12b012 5144 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
5145 {
5146 /* A catchpoint triggered. */
94c57d6a
PA
5147 process_event_stop_test (ecs);
5148 return;
edcc5120 5149 }
488f131b 5150
b0f4b84b
DJ
5151 /* If requested, stop when the dynamic linker notifies
5152 gdb of events. This allows the user to get control
5153 and place breakpoints in initializer routines for
5154 dynamically loaded objects (among other things). */
a493e3e2 5155 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
5156 if (stop_on_solib_events)
5157 {
55409f9d
DJ
5158 /* Make sure we print "Stopped due to solib-event" in
5159 normal_stop. */
c4464ade 5160 stop_print_frame = true;
55409f9d 5161
22bcd14b 5162 stop_waiting (ecs);
b0f4b84b
DJ
5163 return;
5164 }
488f131b 5165 }
b0f4b84b
DJ
5166
5167 /* If we are skipping through a shell, or through shared library
5168 loading that we aren't interested in, resume the program. If
5c09a2c5 5169 we're running the program normally, also resume. */
b0f4b84b
DJ
5170 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5171 {
74960c60
VP
5172 /* Loading of shared libraries might have changed breakpoint
5173 addresses. Make sure new breakpoints are inserted. */
a25a5a45 5174 if (stop_soon == NO_STOP_QUIETLY)
74960c60 5175 insert_breakpoints ();
64ce06e4 5176 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
5177 prepare_to_wait (ecs);
5178 return;
5179 }
5180
5c09a2c5
PA
5181 /* But stop if we're attaching or setting up a remote
5182 connection. */
5183 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5184 || stop_soon == STOP_QUIETLY_REMOTE)
5185 {
1eb8556f 5186 infrun_debug_printf ("quietly stopped");
22bcd14b 5187 stop_waiting (ecs);
5c09a2c5
PA
5188 return;
5189 }
5190
5191 internal_error (__FILE__, __LINE__,
5192 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 5193
488f131b 5194 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5195 if (handle_stop_requested (ecs))
5196 return;
00431a78 5197 context_switch (ecs);
64ce06e4 5198 resume (GDB_SIGNAL_0);
488f131b
JB
5199 prepare_to_wait (ecs);
5200 return;
c5aa993b 5201
65706a29 5202 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5203 if (handle_stop_requested (ecs))
5204 return;
00431a78 5205 context_switch (ecs);
65706a29
PA
5206 if (!switch_back_to_stepped_thread (ecs))
5207 keep_going (ecs);
5208 return;
5209
488f131b 5210 case TARGET_WAITKIND_EXITED:
940c3c06 5211 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
5212 {
5213 /* Depending on the system, ecs->ptid may point to a thread or
5214 to a process. On some targets, target_mourn_inferior may
5215 need to have access to the just-exited thread. That is the
5216 case of GNU/Linux's "checkpoint" support, for example.
5217 Call the switch_to_xxx routine as appropriate. */
5218 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5219 if (thr != nullptr)
5220 switch_to_thread (thr);
5221 else
5222 {
5223 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5224 switch_to_inferior_no_thread (inf);
5225 }
5226 }
6c95b8df 5227 handle_vfork_child_exec_or_exit (0);
223ffa71 5228 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5229
0c557179
SDJ
5230 /* Clearing any previous state of convenience variables. */
5231 clear_exit_convenience_vars ();
5232
940c3c06
PA
5233 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5234 {
5235 /* Record the exit code in the convenience variable $_exitcode, so
5236 that the user can inspect this again later. */
5237 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5238 (LONGEST) ecs->ws.value.integer);
5239
5240 /* Also record this in the inferior itself. */
5241 current_inferior ()->has_exit_code = 1;
5242 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5243
98eb56a4
PA
5244 /* Support the --return-child-result option. */
5245 return_child_result_value = ecs->ws.value.integer;
5246
76727919 5247 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5248 }
5249 else
0c557179 5250 {
00431a78 5251 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5252
5253 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5254 {
5255 /* Set the value of the internal variable $_exitsignal,
5256 which holds the signal uncaught by the inferior. */
5257 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5258 gdbarch_gdb_signal_to_target (gdbarch,
5259 ecs->ws.value.sig));
5260 }
5261 else
5262 {
5263 /* We don't have access to the target's method used for
5264 converting between signal numbers (GDB's internal
5265 representation <-> target's representation).
5266 Therefore, we cannot do a good job at displaying this
5267 information to the user. It's better to just warn
5268 her about it (if infrun debugging is enabled), and
5269 give up. */
1eb8556f
SM
5270 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5271 "signal number.");
0c557179
SDJ
5272 }
5273
76727919 5274 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5275 }
8cf64490 5276
488f131b 5277 gdb_flush (gdb_stdout);
bc1e6c81 5278 target_mourn_inferior (inferior_ptid);
c4464ade 5279 stop_print_frame = false;
22bcd14b 5280 stop_waiting (ecs);
488f131b 5281 return;
c5aa993b 5282
488f131b 5283 case TARGET_WAITKIND_FORKED:
deb3b17b 5284 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5285 /* Check whether the inferior is displaced stepping. */
5286 {
00431a78 5287 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5288 struct gdbarch *gdbarch = regcache->arch ();
c0aba012 5289 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639 5290
c0aba012
SM
5291 /* If this is a fork (child gets its own address space copy) and the
5292 displaced step buffer was in use at the time of the fork, restore
5293 displaced step buffer bytes in the child process. */
5294 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
5295 {
5296 displaced_step_inferior_state *displaced
f5f01699 5297 = &parent_inf->displaced_step_state;
c0aba012
SM
5298
5299 if (displaced->step_thread != nullptr)
5300 displaced_step_restore (displaced, ecs->ws.value.related_pid);
5301 }
5302
5303 /* If displaced stepping is supported, and thread ecs->ptid is
5304 displaced stepping. */
00431a78 5305 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639 5306 {
e2d96639
YQ
5307 struct regcache *child_regcache;
5308 CORE_ADDR parent_pc;
5309
5310 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5311 indicating that the displaced stepping of syscall instruction
5312 has been done. Perform cleanup for parent process here. Note
5313 that this operation also cleans up the child process for vfork,
5314 because their pages are shared. */
00431a78 5315 displaced_step_fixup (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5316 /* Start a new step-over in another thread if there's one
5317 that needs it. */
5318 start_step_over ();
e2d96639 5319
e2d96639
YQ
5320 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5321 the child's PC is also within the scratchpad. Set the child's PC
5322 to the parent's PC value, which has already been fixed up.
5323 FIXME: we use the parent's aspace here, although we're touching
5324 the child, because the child hasn't been added to the inferior
5325 list yet at this point. */
5326
5327 child_regcache
5b6d1e4f
PA
5328 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5329 ecs->ws.value.related_pid,
e2d96639
YQ
5330 gdbarch,
5331 parent_inf->aspace);
5332 /* Read PC value of parent process. */
5333 parent_pc = regcache_read_pc (regcache);
5334
136821d9
SM
5335 displaced_debug_printf ("write child pc from %s to %s",
5336 paddress (gdbarch,
5337 regcache_read_pc (child_regcache)),
5338 paddress (gdbarch, parent_pc));
e2d96639
YQ
5339
5340 regcache_write_pc (child_regcache, parent_pc);
5341 }
5342 }
5343
00431a78 5344 context_switch (ecs);
5a2901d9 5345
b242c3c2
PA
5346 /* Immediately detach breakpoints from the child before there's
5347 any chance of letting the user delete breakpoints from the
5348 breakpoint lists. If we don't do this early, it's easy to
5349 leave left over traps in the child, vis: "break foo; catch
5350 fork; c; <fork>; del; c; <child calls foo>". We only follow
5351 the fork on the last `continue', and by that time the
5352 breakpoint at "foo" is long gone from the breakpoint table.
5353 If we vforked, then we don't need to unpatch here, since both
5354 parent and child are sharing the same memory pages; we'll
5355 need to unpatch at follow/detach time instead to be certain
5356 that new breakpoints added between catchpoint hit time and
5357 vfork follow are detached. */
5358 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5359 {
b242c3c2
PA
5360 /* This won't actually modify the breakpoint list, but will
5361 physically remove the breakpoints from the child. */
d80ee84f 5362 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5363 }
5364
34b7e8a6 5365 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5366
e58b0e63
PA
5367 /* In case the event is caught by a catchpoint, remember that
5368 the event is to be followed at the next resume of the thread,
5369 and not immediately. */
5370 ecs->event_thread->pending_follow = ecs->ws;
5371
f2ffa92b
PA
5372 ecs->event_thread->suspend.stop_pc
5373 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5374
16c381f0 5375 ecs->event_thread->control.stop_bpstat
a01bda52 5376 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5377 ecs->event_thread->suspend.stop_pc,
5378 ecs->event_thread, &ecs->ws);
675bf4cb 5379
c65d6b55
PA
5380 if (handle_stop_requested (ecs))
5381 return;
5382
ce12b012
PA
5383 /* If no catchpoint triggered for this, then keep going. Note
5384 that we're interested in knowing the bpstat actually causes a
5385 stop, not just if it may explain the signal. Software
5386 watchpoints, for example, always appear in the bpstat. */
5387 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5388 {
5ab2fbf1 5389 bool follow_child
3e43a32a 5390 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5391
a493e3e2 5392 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5393
5b6d1e4f
PA
5394 process_stratum_target *targ
5395 = ecs->event_thread->inf->process_target ();
5396
5ab2fbf1 5397 bool should_resume = follow_fork ();
e58b0e63 5398
5b6d1e4f
PA
5399 /* Note that one of these may be an invalid pointer,
5400 depending on detach_fork. */
00431a78 5401 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5402 thread_info *child
5403 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5404
a2077e25
PA
5405 /* At this point, the parent is marked running, and the
5406 child is marked stopped. */
5407
5408 /* If not resuming the parent, mark it stopped. */
5409 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5410 parent->set_running (false);
a2077e25
PA
5411
5412 /* If resuming the child, mark it running. */
5413 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5414 child->set_running (true);
a2077e25 5415
6c95b8df 5416 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5417 if (!detach_fork && (non_stop
5418 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5419 {
5420 if (follow_child)
5421 switch_to_thread (parent);
5422 else
5423 switch_to_thread (child);
5424
5425 ecs->event_thread = inferior_thread ();
5426 ecs->ptid = inferior_ptid;
5427 keep_going (ecs);
5428 }
5429
5430 if (follow_child)
5431 switch_to_thread (child);
5432 else
5433 switch_to_thread (parent);
5434
e58b0e63
PA
5435 ecs->event_thread = inferior_thread ();
5436 ecs->ptid = inferior_ptid;
5437
5438 if (should_resume)
5439 keep_going (ecs);
5440 else
22bcd14b 5441 stop_waiting (ecs);
04e68871
DJ
5442 return;
5443 }
94c57d6a
PA
5444 process_event_stop_test (ecs);
5445 return;
488f131b 5446
6c95b8df
PA
5447 case TARGET_WAITKIND_VFORK_DONE:
5448 /* Done with the shared memory region. Re-insert breakpoints in
5449 the parent, and keep going. */
5450
00431a78 5451 context_switch (ecs);
6c95b8df
PA
5452
5453 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5454 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5455
5456 if (handle_stop_requested (ecs))
5457 return;
5458
6c95b8df
PA
5459 /* This also takes care of reinserting breakpoints in the
5460 previously locked inferior. */
5461 keep_going (ecs);
5462 return;
5463
488f131b 5464 case TARGET_WAITKIND_EXECD:
488f131b 5465
cbd2b4e3
PA
5466 /* Note we can't read registers yet (the stop_pc), because we
5467 don't yet know the inferior's post-exec architecture.
5468 'stop_pc' is explicitly read below instead. */
00431a78 5469 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5470
6c95b8df
PA
5471 /* Do whatever is necessary to the parent branch of the vfork. */
5472 handle_vfork_child_exec_or_exit (1);
5473
795e548f 5474 /* This causes the eventpoints and symbol table to be reset.
dda83cd7
SM
5475 Must do this now, before trying to determine whether to
5476 stop. */
71b43ef8 5477 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5478
17d8546e
DB
5479 /* In follow_exec we may have deleted the original thread and
5480 created a new one. Make sure that the event thread is the
5481 execd thread for that case (this is a nop otherwise). */
5482 ecs->event_thread = inferior_thread ();
5483
f2ffa92b
PA
5484 ecs->event_thread->suspend.stop_pc
5485 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5486
16c381f0 5487 ecs->event_thread->control.stop_bpstat
a01bda52 5488 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5489 ecs->event_thread->suspend.stop_pc,
5490 ecs->event_thread, &ecs->ws);
795e548f 5491
71b43ef8
PA
5492 /* Note that this may be referenced from inside
5493 bpstat_stop_status above, through inferior_has_execd. */
5494 xfree (ecs->ws.value.execd_pathname);
5495 ecs->ws.value.execd_pathname = NULL;
5496
c65d6b55
PA
5497 if (handle_stop_requested (ecs))
5498 return;
5499
04e68871 5500 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5501 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5502 {
a493e3e2 5503 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5504 keep_going (ecs);
5505 return;
5506 }
94c57d6a
PA
5507 process_event_stop_test (ecs);
5508 return;
488f131b 5509
b4dc5ffa 5510 /* Be careful not to try to gather much state about a thread
dda83cd7 5511 that's in a syscall. It's frequently a losing proposition. */
488f131b 5512 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5513 /* Getting the current syscall number. */
94c57d6a
PA
5514 if (handle_syscall_event (ecs) == 0)
5515 process_event_stop_test (ecs);
5516 return;
c906108c 5517
488f131b 5518 /* Before examining the threads further, step this thread to
dda83cd7
SM
5519 get it entirely out of the syscall. (We get notice of the
5520 event when the thread is just on the verge of exiting a
5521 syscall. Stepping one instruction seems to get it back
5522 into user code.) */
488f131b 5523 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5524 if (handle_syscall_event (ecs) == 0)
5525 process_event_stop_test (ecs);
5526 return;
c906108c 5527
488f131b 5528 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5529 handle_signal_stop (ecs);
5530 return;
c906108c 5531
b2175913
MS
5532 case TARGET_WAITKIND_NO_HISTORY:
5533 /* Reverse execution: target ran out of history info. */
eab402df 5534
d1988021 5535 /* Switch to the stopped thread. */
00431a78 5536 context_switch (ecs);
1eb8556f 5537 infrun_debug_printf ("stopped");
d1988021 5538
34b7e8a6 5539 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5540 ecs->event_thread->suspend.stop_pc
5541 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5542
5543 if (handle_stop_requested (ecs))
5544 return;
5545
76727919 5546 gdb::observers::no_history.notify ();
22bcd14b 5547 stop_waiting (ecs);
b2175913 5548 return;
488f131b 5549 }
4f5d7f63
PA
5550}
5551
372316f1
PA
5552/* Restart threads back to what they were trying to do back when we
5553 paused them for an in-line step-over. The EVENT_THREAD thread is
5554 ignored. */
4d9d9d04
PA
5555
5556static void
372316f1
PA
5557restart_threads (struct thread_info *event_thread)
5558{
372316f1
PA
5559 /* In case the instruction just stepped spawned a new thread. */
5560 update_thread_list ();
5561
08036331 5562 for (thread_info *tp : all_non_exited_threads ())
372316f1 5563 {
f3f8ece4
PA
5564 switch_to_thread_no_regs (tp);
5565
372316f1
PA
5566 if (tp == event_thread)
5567 {
1eb8556f
SM
5568 infrun_debug_printf ("restart threads: [%s] is event thread",
5569 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5570 continue;
5571 }
5572
5573 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5574 {
1eb8556f
SM
5575 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5576 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5577 continue;
5578 }
5579
5580 if (tp->resumed)
5581 {
1eb8556f
SM
5582 infrun_debug_printf ("restart threads: [%s] resumed",
5583 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5584 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5585 continue;
5586 }
5587
5588 if (thread_is_in_step_over_chain (tp))
5589 {
1eb8556f
SM
5590 infrun_debug_printf ("restart threads: [%s] needs step-over",
5591 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5592 gdb_assert (!tp->resumed);
5593 continue;
5594 }
5595
5596
5597 if (tp->suspend.waitstatus_pending_p)
5598 {
1eb8556f
SM
5599 infrun_debug_printf ("restart threads: [%s] has pending status",
5600 target_pid_to_str (tp->ptid).c_str ());
719546c4 5601 tp->resumed = true;
372316f1
PA
5602 continue;
5603 }
5604
c65d6b55
PA
5605 gdb_assert (!tp->stop_requested);
5606
372316f1
PA
5607 /* If some thread needs to start a step-over at this point, it
5608 should still be in the step-over queue, and thus skipped
5609 above. */
5610 if (thread_still_needs_step_over (tp))
5611 {
5612 internal_error (__FILE__, __LINE__,
5613 "thread [%s] needs a step-over, but not in "
5614 "step-over queue\n",
a068643d 5615 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5616 }
5617
5618 if (currently_stepping (tp))
5619 {
1eb8556f
SM
5620 infrun_debug_printf ("restart threads: [%s] was stepping",
5621 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5622 keep_going_stepped_thread (tp);
5623 }
5624 else
5625 {
5626 struct execution_control_state ecss;
5627 struct execution_control_state *ecs = &ecss;
5628
1eb8556f
SM
5629 infrun_debug_printf ("restart threads: [%s] continuing",
5630 target_pid_to_str (tp->ptid).c_str ());
372316f1 5631 reset_ecs (ecs, tp);
00431a78 5632 switch_to_thread (tp);
372316f1
PA
5633 keep_going_pass_signal (ecs);
5634 }
5635 }
5636}
5637
5638/* Callback for iterate_over_threads. Find a resumed thread that has
5639 a pending waitstatus. */
5640
5641static int
5642resumed_thread_with_pending_status (struct thread_info *tp,
5643 void *arg)
5644{
5645 return (tp->resumed
5646 && tp->suspend.waitstatus_pending_p);
5647}
5648
5649/* Called when we get an event that may finish an in-line or
5650 out-of-line (displaced stepping) step-over started previously.
5651 Return true if the event is processed and we should go back to the
5652 event loop; false if the caller should continue processing the
5653 event. */
5654
5655static int
4d9d9d04
PA
5656finish_step_over (struct execution_control_state *ecs)
5657{
00431a78 5658 displaced_step_fixup (ecs->event_thread,
4d9d9d04
PA
5659 ecs->event_thread->suspend.stop_signal);
5660
c4464ade 5661 bool had_step_over_info = step_over_info_valid_p ();
372316f1
PA
5662
5663 if (had_step_over_info)
4d9d9d04
PA
5664 {
5665 /* If we're stepping over a breakpoint with all threads locked,
5666 then only the thread that was stepped should be reporting
5667 back an event. */
5668 gdb_assert (ecs->event_thread->control.trap_expected);
5669
c65d6b55 5670 clear_step_over_info ();
4d9d9d04
PA
5671 }
5672
fbea99ea 5673 if (!target_is_non_stop_p ())
372316f1 5674 return 0;
4d9d9d04
PA
5675
5676 /* Start a new step-over in another thread if there's one that
5677 needs it. */
5678 start_step_over ();
372316f1
PA
5679
5680 /* If we were stepping over a breakpoint before, and haven't started
5681 a new in-line step-over sequence, then restart all other threads
5682 (except the event thread). We can't do this in all-stop, as then
5683 e.g., we wouldn't be able to issue any other remote packet until
5684 these other threads stop. */
5685 if (had_step_over_info && !step_over_info_valid_p ())
5686 {
5687 struct thread_info *pending;
5688
5689 /* If we only have threads with pending statuses, the restart
5690 below won't restart any thread and so nothing re-inserts the
5691 breakpoint we just stepped over. But we need it inserted
5692 when we later process the pending events, otherwise if
5693 another thread has a pending event for this breakpoint too,
5694 we'd discard its event (because the breakpoint that
5695 originally caused the event was no longer inserted). */
00431a78 5696 context_switch (ecs);
372316f1
PA
5697 insert_breakpoints ();
5698
5699 restart_threads (ecs->event_thread);
5700
5701 /* If we have events pending, go through handle_inferior_event
5702 again, picking up a pending event at random. This avoids
5703 thread starvation. */
5704
5705 /* But not if we just stepped over a watchpoint in order to let
5706 the instruction execute so we can evaluate its expression.
5707 The set of watchpoints that triggered is recorded in the
5708 breakpoint objects themselves (see bp->watchpoint_triggered).
5709 If we processed another event first, that other event could
5710 clobber this info. */
5711 if (ecs->event_thread->stepping_over_watchpoint)
5712 return 0;
5713
5714 pending = iterate_over_threads (resumed_thread_with_pending_status,
5715 NULL);
5716 if (pending != NULL)
5717 {
5718 struct thread_info *tp = ecs->event_thread;
5719 struct regcache *regcache;
5720
1eb8556f
SM
5721 infrun_debug_printf ("found resumed threads with "
5722 "pending events, saving status");
372316f1
PA
5723
5724 gdb_assert (pending != tp);
5725
5726 /* Record the event thread's event for later. */
5727 save_waitstatus (tp, &ecs->ws);
5728 /* This was cleared early, by handle_inferior_event. Set it
5729 so this pending event is considered by
5730 do_target_wait. */
719546c4 5731 tp->resumed = true;
372316f1
PA
5732
5733 gdb_assert (!tp->executing);
5734
00431a78 5735 regcache = get_thread_regcache (tp);
372316f1
PA
5736 tp->suspend.stop_pc = regcache_read_pc (regcache);
5737
1eb8556f
SM
5738 infrun_debug_printf ("saved stop_pc=%s for %s "
5739 "(currently_stepping=%d)",
5740 paddress (target_gdbarch (),
dda83cd7 5741 tp->suspend.stop_pc),
1eb8556f
SM
5742 target_pid_to_str (tp->ptid).c_str (),
5743 currently_stepping (tp));
372316f1
PA
5744
5745 /* This in-line step-over finished; clear this so we won't
5746 start a new one. This is what handle_signal_stop would
5747 do, if we returned false. */
5748 tp->stepping_over_breakpoint = 0;
5749
5750 /* Wake up the event loop again. */
5751 mark_async_event_handler (infrun_async_inferior_event_token);
5752
5753 prepare_to_wait (ecs);
5754 return 1;
5755 }
5756 }
5757
5758 return 0;
4d9d9d04
PA
5759}
5760
4f5d7f63
PA
5761/* Come here when the program has stopped with a signal. */
5762
5763static void
5764handle_signal_stop (struct execution_control_state *ecs)
5765{
5766 struct frame_info *frame;
5767 struct gdbarch *gdbarch;
5768 int stopped_by_watchpoint;
5769 enum stop_kind stop_soon;
5770 int random_signal;
c906108c 5771
f0407826
DE
5772 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5773
c65d6b55
PA
5774 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5775
f0407826
DE
5776 /* Do we need to clean up the state of a thread that has
5777 completed a displaced single-step? (Doing so usually affects
5778 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5779 if (finish_step_over (ecs))
5780 return;
f0407826
DE
5781
5782 /* If we either finished a single-step or hit a breakpoint, but
5783 the user wanted this thread to be stopped, pretend we got a
5784 SIG0 (generic unsignaled stop). */
5785 if (ecs->event_thread->stop_requested
5786 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5787 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5788
f2ffa92b
PA
5789 ecs->event_thread->suspend.stop_pc
5790 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5791
527159b7 5792 if (debug_infrun)
237fc4c9 5793 {
00431a78 5794 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5795 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5796
f3f8ece4 5797 switch_to_thread (ecs->event_thread);
5af949e3 5798
1eb8556f
SM
5799 infrun_debug_printf ("stop_pc=%s",
5800 paddress (reg_gdbarch,
5801 ecs->event_thread->suspend.stop_pc));
d92524f1 5802 if (target_stopped_by_watchpoint ())
237fc4c9 5803 {
dda83cd7 5804 CORE_ADDR addr;
abbb1732 5805
1eb8556f 5806 infrun_debug_printf ("stopped by watchpoint");
237fc4c9 5807
8b88a78e 5808 if (target_stopped_data_address (current_top_target (), &addr))
1eb8556f 5809 infrun_debug_printf ("stopped data address=%s",
dda83cd7
SM
5810 paddress (reg_gdbarch, addr));
5811 else
1eb8556f 5812 infrun_debug_printf ("(no data address available)");
237fc4c9
PA
5813 }
5814 }
527159b7 5815
36fa8042
PA
5816 /* This is originated from start_remote(), start_inferior() and
5817 shared libraries hook functions. */
00431a78 5818 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5819 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5820 {
00431a78 5821 context_switch (ecs);
1eb8556f 5822 infrun_debug_printf ("quietly stopped");
c4464ade 5823 stop_print_frame = true;
22bcd14b 5824 stop_waiting (ecs);
36fa8042
PA
5825 return;
5826 }
5827
36fa8042
PA
5828 /* This originates from attach_command(). We need to overwrite
5829 the stop_signal here, because some kernels don't ignore a
5830 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5831 See more comments in inferior.h. On the other hand, if we
5832 get a non-SIGSTOP, report it to the user - assume the backend
5833 will handle the SIGSTOP if it should show up later.
5834
5835 Also consider that the attach is complete when we see a
5836 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5837 target extended-remote report it instead of a SIGSTOP
5838 (e.g. gdbserver). We already rely on SIGTRAP being our
5839 signal, so this is no exception.
5840
5841 Also consider that the attach is complete when we see a
5842 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5843 the target to stop all threads of the inferior, in case the
5844 low level attach operation doesn't stop them implicitly. If
5845 they weren't stopped implicitly, then the stub will report a
5846 GDB_SIGNAL_0, meaning: stopped for no particular reason
5847 other than GDB's request. */
5848 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5849 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5850 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5851 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5852 {
c4464ade 5853 stop_print_frame = true;
22bcd14b 5854 stop_waiting (ecs);
36fa8042
PA
5855 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5856 return;
5857 }
5858
488f131b 5859 /* See if something interesting happened to the non-current thread. If
b40c7d58 5860 so, then switch to that thread. */
d7e15655 5861 if (ecs->ptid != inferior_ptid)
488f131b 5862 {
1eb8556f 5863 infrun_debug_printf ("context switch");
527159b7 5864
00431a78 5865 context_switch (ecs);
c5aa993b 5866
9a4105ab 5867 if (deprecated_context_hook)
00431a78 5868 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5869 }
c906108c 5870
568d6575
UW
5871 /* At this point, get hold of the now-current thread's frame. */
5872 frame = get_current_frame ();
5873 gdbarch = get_frame_arch (frame);
5874
2adfaa28 5875 /* Pull the single step breakpoints out of the target. */
af48d08f 5876 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5877 {
af48d08f 5878 struct regcache *regcache;
af48d08f 5879 CORE_ADDR pc;
2adfaa28 5880
00431a78 5881 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5882 const address_space *aspace = regcache->aspace ();
5883
af48d08f 5884 pc = regcache_read_pc (regcache);
34b7e8a6 5885
af48d08f
PA
5886 /* However, before doing so, if this single-step breakpoint was
5887 actually for another thread, set this thread up for moving
5888 past it. */
5889 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5890 aspace, pc))
5891 {
5892 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 5893 {
1eb8556f
SM
5894 infrun_debug_printf ("[%s] hit another thread's single-step "
5895 "breakpoint",
5896 target_pid_to_str (ecs->ptid).c_str ());
af48d08f
PA
5897 ecs->hit_singlestep_breakpoint = 1;
5898 }
5899 }
5900 else
5901 {
1eb8556f
SM
5902 infrun_debug_printf ("[%s] hit its single-step breakpoint",
5903 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5904 }
488f131b 5905 }
af48d08f 5906 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5907
963f9c80
PA
5908 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5909 && ecs->event_thread->control.trap_expected
5910 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5911 stopped_by_watchpoint = 0;
5912 else
5913 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5914
5915 /* If necessary, step over this watchpoint. We'll be back to display
5916 it in a moment. */
5917 if (stopped_by_watchpoint
9aed480c 5918 && (target_have_steppable_watchpoint ()
568d6575 5919 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5920 {
488f131b 5921 /* At this point, we are stopped at an instruction which has
dda83cd7
SM
5922 attempted to write to a piece of memory under control of
5923 a watchpoint. The instruction hasn't actually executed
5924 yet. If we were to evaluate the watchpoint expression
5925 now, we would get the old value, and therefore no change
5926 would seem to have occurred.
5927
5928 In order to make watchpoints work `right', we really need
5929 to complete the memory write, and then evaluate the
5930 watchpoint expression. We do this by single-stepping the
d983da9c
DJ
5931 target.
5932
7f89fd65 5933 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5934 it. For example, the PA can (with some kernel cooperation)
5935 single step over a watchpoint without disabling the watchpoint.
5936
5937 It is far more common to need to disable a watchpoint to step
5938 the inferior over it. If we have non-steppable watchpoints,
5939 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5940 disable all watchpoints.
5941
5942 Any breakpoint at PC must also be stepped over -- if there's
5943 one, it will have already triggered before the watchpoint
5944 triggered, and we either already reported it to the user, or
5945 it didn't cause a stop and we called keep_going. In either
5946 case, if there was a breakpoint at PC, we must be trying to
5947 step past it. */
5948 ecs->event_thread->stepping_over_watchpoint = 1;
5949 keep_going (ecs);
488f131b
JB
5950 return;
5951 }
5952
4e1c45ea 5953 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5954 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5955 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5956 ecs->event_thread->control.stop_step = 0;
c4464ade 5957 stop_print_frame = true;
488f131b 5958 stopped_by_random_signal = 0;
ddfe970e 5959 bpstat stop_chain = NULL;
488f131b 5960
edb3359d
DJ
5961 /* Hide inlined functions starting here, unless we just performed stepi or
5962 nexti. After stepi and nexti, always show the innermost frame (not any
5963 inline function call sites). */
16c381f0 5964 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 5965 {
00431a78
PA
5966 const address_space *aspace
5967 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
5968
5969 /* skip_inline_frames is expensive, so we avoid it if we can
5970 determine that the address is one where functions cannot have
5971 been inlined. This improves performance with inferiors that
5972 load a lot of shared libraries, because the solib event
5973 breakpoint is defined as the address of a function (i.e. not
5974 inline). Note that we have to check the previous PC as well
5975 as the current one to catch cases when we have just
5976 single-stepped off a breakpoint prior to reinstating it.
5977 Note that we're assuming that the code we single-step to is
5978 not inline, but that's not definitive: there's nothing
5979 preventing the event breakpoint function from containing
5980 inlined code, and the single-step ending up there. If the
5981 user had set a breakpoint on that inlined code, the missing
5982 skip_inline_frames call would break things. Fortunately
5983 that's an extremely unlikely scenario. */
f2ffa92b
PA
5984 if (!pc_at_non_inline_function (aspace,
5985 ecs->event_thread->suspend.stop_pc,
5986 &ecs->ws)
a210c238
MR
5987 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5988 && ecs->event_thread->control.trap_expected
5989 && pc_at_non_inline_function (aspace,
5990 ecs->event_thread->prev_pc,
09ac7c10 5991 &ecs->ws)))
1c5a993e 5992 {
f2ffa92b
PA
5993 stop_chain = build_bpstat_chain (aspace,
5994 ecs->event_thread->suspend.stop_pc,
5995 &ecs->ws);
00431a78 5996 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
5997
5998 /* Re-fetch current thread's frame in case that invalidated
5999 the frame cache. */
6000 frame = get_current_frame ();
6001 gdbarch = get_frame_arch (frame);
6002 }
0574c78f 6003 }
edb3359d 6004
a493e3e2 6005 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 6006 && ecs->event_thread->control.trap_expected
568d6575 6007 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 6008 && currently_stepping (ecs->event_thread))
3352ef37 6009 {
b50d7442 6010 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 6011 also on an instruction that needs to be stepped multiple
1777feb0 6012 times before it's been fully executing. E.g., architectures
3352ef37
AC
6013 with a delay slot. It needs to be stepped twice, once for
6014 the instruction and once for the delay slot. */
6015 int step_through_delay
568d6575 6016 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 6017
1eb8556f
SM
6018 if (step_through_delay)
6019 infrun_debug_printf ("step through delay");
6020
16c381f0
JK
6021 if (ecs->event_thread->control.step_range_end == 0
6022 && step_through_delay)
3352ef37
AC
6023 {
6024 /* The user issued a continue when stopped at a breakpoint.
6025 Set up for another trap and get out of here. */
dda83cd7
SM
6026 ecs->event_thread->stepping_over_breakpoint = 1;
6027 keep_going (ecs);
6028 return;
3352ef37
AC
6029 }
6030 else if (step_through_delay)
6031 {
6032 /* The user issued a step when stopped at a breakpoint.
6033 Maybe we should stop, maybe we should not - the delay
6034 slot *might* correspond to a line of source. In any
ca67fcb8
VP
6035 case, don't decide that here, just set
6036 ecs->stepping_over_breakpoint, making sure we
6037 single-step again before breakpoints are re-inserted. */
4e1c45ea 6038 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
6039 }
6040 }
6041
ab04a2af
TT
6042 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6043 handles this event. */
6044 ecs->event_thread->control.stop_bpstat
a01bda52 6045 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
6046 ecs->event_thread->suspend.stop_pc,
6047 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 6048
ab04a2af
TT
6049 /* Following in case break condition called a
6050 function. */
c4464ade 6051 stop_print_frame = true;
73dd234f 6052
ab04a2af
TT
6053 /* This is where we handle "moribund" watchpoints. Unlike
6054 software breakpoints traps, hardware watchpoint traps are
6055 always distinguishable from random traps. If no high-level
6056 watchpoint is associated with the reported stop data address
6057 anymore, then the bpstat does not explain the signal ---
6058 simply make sure to ignore it if `stopped_by_watchpoint' is
6059 set. */
6060
1eb8556f 6061 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6062 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6063 GDB_SIGNAL_TRAP)
ab04a2af 6064 && stopped_by_watchpoint)
1eb8556f
SM
6065 {
6066 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6067 "ignoring");
6068 }
73dd234f 6069
bac7d97b 6070 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6071 at one stage in the past included checks for an inferior
6072 function call's call dummy's return breakpoint. The original
6073 comment, that went with the test, read:
03cebad2 6074
ab04a2af
TT
6075 ``End of a stack dummy. Some systems (e.g. Sony news) give
6076 another signal besides SIGTRAP, so check here as well as
6077 above.''
73dd234f 6078
ab04a2af
TT
6079 If someone ever tries to get call dummys on a
6080 non-executable stack to work (where the target would stop
6081 with something like a SIGSEGV), then those tests might need
6082 to be re-instated. Given, however, that the tests were only
6083 enabled when momentary breakpoints were not being used, I
6084 suspect that it won't be the case.
488f131b 6085
ab04a2af
TT
6086 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6087 be necessary for call dummies on a non-executable stack on
6088 SPARC. */
488f131b 6089
bac7d97b 6090 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6091 random_signal
6092 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6093 ecs->event_thread->suspend.stop_signal);
bac7d97b 6094
1cf4d951
PA
6095 /* Maybe this was a trap for a software breakpoint that has since
6096 been removed. */
6097 if (random_signal && target_stopped_by_sw_breakpoint ())
6098 {
5133a315
LM
6099 if (gdbarch_program_breakpoint_here_p (gdbarch,
6100 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6101 {
6102 struct regcache *regcache;
6103 int decr_pc;
6104
6105 /* Re-adjust PC to what the program would see if GDB was not
6106 debugging it. */
00431a78 6107 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6108 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6109 if (decr_pc != 0)
6110 {
07036511
TT
6111 gdb::optional<scoped_restore_tmpl<int>>
6112 restore_operation_disable;
1cf4d951
PA
6113
6114 if (record_full_is_used ())
07036511
TT
6115 restore_operation_disable.emplace
6116 (record_full_gdb_operation_disable_set ());
1cf4d951 6117
f2ffa92b
PA
6118 regcache_write_pc (regcache,
6119 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6120 }
6121 }
6122 else
6123 {
6124 /* A delayed software breakpoint event. Ignore the trap. */
1eb8556f 6125 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
6126 random_signal = 0;
6127 }
6128 }
6129
6130 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6131 has since been removed. */
6132 if (random_signal && target_stopped_by_hw_breakpoint ())
6133 {
6134 /* A delayed hardware breakpoint event. Ignore the trap. */
1eb8556f
SM
6135 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6136 "trap, ignoring");
1cf4d951
PA
6137 random_signal = 0;
6138 }
6139
bac7d97b
PA
6140 /* If not, perhaps stepping/nexting can. */
6141 if (random_signal)
6142 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6143 && currently_stepping (ecs->event_thread));
ab04a2af 6144
2adfaa28
PA
6145 /* Perhaps the thread hit a single-step breakpoint of _another_
6146 thread. Single-step breakpoints are transparent to the
6147 breakpoints module. */
6148 if (random_signal)
6149 random_signal = !ecs->hit_singlestep_breakpoint;
6150
bac7d97b
PA
6151 /* No? Perhaps we got a moribund watchpoint. */
6152 if (random_signal)
6153 random_signal = !stopped_by_watchpoint;
ab04a2af 6154
c65d6b55
PA
6155 /* Always stop if the user explicitly requested this thread to
6156 remain stopped. */
6157 if (ecs->event_thread->stop_requested)
6158 {
6159 random_signal = 1;
1eb8556f 6160 infrun_debug_printf ("user-requested stop");
c65d6b55
PA
6161 }
6162
488f131b
JB
6163 /* For the program's own signals, act according to
6164 the signal handling tables. */
6165
ce12b012 6166 if (random_signal)
488f131b
JB
6167 {
6168 /* Signal not for debugging purposes. */
5b6d1e4f 6169 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
c9737c08 6170 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6171
1eb8556f
SM
6172 infrun_debug_printf ("random signal (%s)",
6173 gdb_signal_to_symbol_string (stop_signal));
527159b7 6174
488f131b
JB
6175 stopped_by_random_signal = 1;
6176
252fbfc8
PA
6177 /* Always stop on signals if we're either just gaining control
6178 of the program, or the user explicitly requested this thread
6179 to remain stopped. */
d6b48e9c 6180 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6181 || ecs->event_thread->stop_requested
24291992 6182 || (!inf->detaching
16c381f0 6183 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 6184 {
22bcd14b 6185 stop_waiting (ecs);
488f131b
JB
6186 return;
6187 }
b57bacec
PA
6188
6189 /* Notify observers the signal has "handle print" set. Note we
6190 returned early above if stopping; normal_stop handles the
6191 printing in that case. */
6192 if (signal_print[ecs->event_thread->suspend.stop_signal])
6193 {
6194 /* The signal table tells us to print about this signal. */
223ffa71 6195 target_terminal::ours_for_output ();
76727919 6196 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6197 target_terminal::inferior ();
b57bacec 6198 }
488f131b
JB
6199
6200 /* Clear the signal if it should not be passed. */
16c381f0 6201 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6202 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6203
f2ffa92b 6204 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6205 && ecs->event_thread->control.trap_expected
8358c15c 6206 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6207 {
6208 /* We were just starting a new sequence, attempting to
6209 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6210 Instead this signal arrives. This signal will take us out
68f53502
AC
6211 of the stepping range so GDB needs to remember to, when
6212 the signal handler returns, resume stepping off that
6213 breakpoint. */
6214 /* To simplify things, "continue" is forced to use the same
6215 code paths as single-step - set a breakpoint at the
6216 signal return address and then, once hit, step off that
6217 breakpoint. */
1eb8556f 6218 infrun_debug_printf ("signal arrived while stepping over breakpoint");
d3169d93 6219
2c03e5be 6220 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6221 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6222 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6223 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6224
6225 /* If we were nexting/stepping some other thread, switch to
6226 it, so that we don't continue it, losing control. */
6227 if (!switch_back_to_stepped_thread (ecs))
6228 keep_going (ecs);
9d799f85 6229 return;
68f53502 6230 }
9d799f85 6231
e5f8a7cc 6232 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6233 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6234 ecs->event_thread)
e5f8a7cc 6235 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6236 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6237 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6238 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6239 {
6240 /* The inferior is about to take a signal that will take it
6241 out of the single step range. Set a breakpoint at the
6242 current PC (which is presumably where the signal handler
6243 will eventually return) and then allow the inferior to
6244 run free.
6245
6246 Note that this is only needed for a signal delivered
6247 while in the single-step range. Nested signals aren't a
6248 problem as they eventually all return. */
1eb8556f 6249 infrun_debug_printf ("signal may take us out of single-step range");
237fc4c9 6250
372316f1 6251 clear_step_over_info ();
2c03e5be 6252 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6253 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6254 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6255 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6256 keep_going (ecs);
6257 return;
d303a6c7 6258 }
9d799f85 6259
85102364 6260 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6261 when either there's a nested signal, or when there's a
6262 pending signal enabled just as the signal handler returns
6263 (leaving the inferior at the step-resume-breakpoint without
6264 actually executing it). Either way continue until the
6265 breakpoint is really hit. */
c447ac0b
PA
6266
6267 if (!switch_back_to_stepped_thread (ecs))
6268 {
1eb8556f 6269 infrun_debug_printf ("random signal, keep going");
c447ac0b
PA
6270
6271 keep_going (ecs);
6272 }
6273 return;
488f131b 6274 }
94c57d6a
PA
6275
6276 process_event_stop_test (ecs);
6277}
6278
6279/* Come here when we've got some debug event / signal we can explain
6280 (IOW, not a random signal), and test whether it should cause a
6281 stop, or whether we should resume the inferior (transparently).
6282 E.g., could be a breakpoint whose condition evaluates false; we
6283 could be still stepping within the line; etc. */
6284
6285static void
6286process_event_stop_test (struct execution_control_state *ecs)
6287{
6288 struct symtab_and_line stop_pc_sal;
6289 struct frame_info *frame;
6290 struct gdbarch *gdbarch;
cdaa5b73
PA
6291 CORE_ADDR jmp_buf_pc;
6292 struct bpstat_what what;
94c57d6a 6293
cdaa5b73 6294 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6295
cdaa5b73
PA
6296 frame = get_current_frame ();
6297 gdbarch = get_frame_arch (frame);
fcf3daef 6298
cdaa5b73 6299 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6300
cdaa5b73
PA
6301 if (what.call_dummy)
6302 {
6303 stop_stack_dummy = what.call_dummy;
6304 }
186c406b 6305
243a9253
PA
6306 /* A few breakpoint types have callbacks associated (e.g.,
6307 bp_jit_event). Run them now. */
6308 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6309
cdaa5b73
PA
6310 /* If we hit an internal event that triggers symbol changes, the
6311 current frame will be invalidated within bpstat_what (e.g., if we
6312 hit an internal solib event). Re-fetch it. */
6313 frame = get_current_frame ();
6314 gdbarch = get_frame_arch (frame);
e2e4d78b 6315
cdaa5b73
PA
6316 switch (what.main_action)
6317 {
6318 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6319 /* If we hit the breakpoint at longjmp while stepping, we
6320 install a momentary breakpoint at the target of the
6321 jmp_buf. */
186c406b 6322
1eb8556f 6323 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 6324
cdaa5b73 6325 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6326
cdaa5b73
PA
6327 if (what.is_longjmp)
6328 {
6329 struct value *arg_value;
6330
6331 /* If we set the longjmp breakpoint via a SystemTap probe,
6332 then use it to extract the arguments. The destination PC
6333 is the third argument to the probe. */
6334 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6335 if (arg_value)
8fa0c4f8
AA
6336 {
6337 jmp_buf_pc = value_as_address (arg_value);
6338 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6339 }
cdaa5b73
PA
6340 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6341 || !gdbarch_get_longjmp_target (gdbarch,
6342 frame, &jmp_buf_pc))
e2e4d78b 6343 {
1eb8556f
SM
6344 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6345 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
6346 keep_going (ecs);
6347 return;
e2e4d78b 6348 }
e2e4d78b 6349
cdaa5b73
PA
6350 /* Insert a breakpoint at resume address. */
6351 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6352 }
6353 else
6354 check_exception_resume (ecs, frame);
6355 keep_going (ecs);
6356 return;
e81a37f7 6357
cdaa5b73
PA
6358 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6359 {
6360 struct frame_info *init_frame;
e81a37f7 6361
cdaa5b73 6362 /* There are several cases to consider.
c906108c 6363
cdaa5b73
PA
6364 1. The initiating frame no longer exists. In this case we
6365 must stop, because the exception or longjmp has gone too
6366 far.
2c03e5be 6367
cdaa5b73
PA
6368 2. The initiating frame exists, and is the same as the
6369 current frame. We stop, because the exception or longjmp
6370 has been caught.
2c03e5be 6371
cdaa5b73
PA
6372 3. The initiating frame exists and is different from the
6373 current frame. This means the exception or longjmp has
6374 been caught beneath the initiating frame, so keep going.
c906108c 6375
cdaa5b73
PA
6376 4. longjmp breakpoint has been placed just to protect
6377 against stale dummy frames and user is not interested in
6378 stopping around longjmps. */
c5aa993b 6379
1eb8556f 6380 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 6381
cdaa5b73
PA
6382 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6383 != NULL);
6384 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6385
cdaa5b73
PA
6386 if (what.is_longjmp)
6387 {
b67a2c6f 6388 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6389
cdaa5b73 6390 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6391 {
cdaa5b73
PA
6392 /* Case 4. */
6393 keep_going (ecs);
6394 return;
e5ef252a 6395 }
cdaa5b73 6396 }
c5aa993b 6397
cdaa5b73 6398 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6399
cdaa5b73
PA
6400 if (init_frame)
6401 {
6402 struct frame_id current_id
6403 = get_frame_id (get_current_frame ());
6404 if (frame_id_eq (current_id,
6405 ecs->event_thread->initiating_frame))
6406 {
6407 /* Case 2. Fall through. */
6408 }
6409 else
6410 {
6411 /* Case 3. */
6412 keep_going (ecs);
6413 return;
6414 }
68f53502 6415 }
488f131b 6416
cdaa5b73
PA
6417 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6418 exists. */
6419 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6420
bdc36728 6421 end_stepping_range (ecs);
cdaa5b73
PA
6422 }
6423 return;
e5ef252a 6424
cdaa5b73 6425 case BPSTAT_WHAT_SINGLE:
1eb8556f 6426 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
6427 ecs->event_thread->stepping_over_breakpoint = 1;
6428 /* Still need to check other stuff, at least the case where we
6429 are stepping and step out of the right range. */
6430 break;
e5ef252a 6431
cdaa5b73 6432 case BPSTAT_WHAT_STEP_RESUME:
1eb8556f 6433 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
e5ef252a 6434
cdaa5b73
PA
6435 delete_step_resume_breakpoint (ecs->event_thread);
6436 if (ecs->event_thread->control.proceed_to_finish
6437 && execution_direction == EXEC_REVERSE)
6438 {
6439 struct thread_info *tp = ecs->event_thread;
6440
6441 /* We are finishing a function in reverse, and just hit the
6442 step-resume breakpoint at the start address of the
6443 function, and we're almost there -- just need to back up
6444 by one more single-step, which should take us back to the
6445 function call. */
6446 tp->control.step_range_start = tp->control.step_range_end = 1;
6447 keep_going (ecs);
e5ef252a 6448 return;
cdaa5b73
PA
6449 }
6450 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6451 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6452 && execution_direction == EXEC_REVERSE)
6453 {
6454 /* We are stepping over a function call in reverse, and just
6455 hit the step-resume breakpoint at the start address of
6456 the function. Go back to single-stepping, which should
6457 take us back to the function call. */
6458 ecs->event_thread->stepping_over_breakpoint = 1;
6459 keep_going (ecs);
6460 return;
6461 }
6462 break;
e5ef252a 6463
cdaa5b73 6464 case BPSTAT_WHAT_STOP_NOISY:
1eb8556f 6465 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
c4464ade 6466 stop_print_frame = true;
e5ef252a 6467
33bf4c5c 6468 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6469 whether a/the breakpoint is there when the thread is next
6470 resumed. */
6471 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6472
22bcd14b 6473 stop_waiting (ecs);
cdaa5b73 6474 return;
e5ef252a 6475
cdaa5b73 6476 case BPSTAT_WHAT_STOP_SILENT:
1eb8556f 6477 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
c4464ade 6478 stop_print_frame = false;
e5ef252a 6479
33bf4c5c 6480 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6481 whether a/the breakpoint is there when the thread is next
6482 resumed. */
6483 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6484 stop_waiting (ecs);
cdaa5b73
PA
6485 return;
6486
6487 case BPSTAT_WHAT_HP_STEP_RESUME:
1eb8556f 6488 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
6489
6490 delete_step_resume_breakpoint (ecs->event_thread);
6491 if (ecs->event_thread->step_after_step_resume_breakpoint)
6492 {
6493 /* Back when the step-resume breakpoint was inserted, we
6494 were trying to single-step off a breakpoint. Go back to
6495 doing that. */
6496 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6497 ecs->event_thread->stepping_over_breakpoint = 1;
6498 keep_going (ecs);
6499 return;
e5ef252a 6500 }
cdaa5b73
PA
6501 break;
6502
6503 case BPSTAT_WHAT_KEEP_CHECKING:
6504 break;
e5ef252a 6505 }
c906108c 6506
af48d08f
PA
6507 /* If we stepped a permanent breakpoint and we had a high priority
6508 step-resume breakpoint for the address we stepped, but we didn't
6509 hit it, then we must have stepped into the signal handler. The
6510 step-resume was only necessary to catch the case of _not_
6511 stepping into the handler, so delete it, and fall through to
6512 checking whether the step finished. */
6513 if (ecs->event_thread->stepped_breakpoint)
6514 {
6515 struct breakpoint *sr_bp
6516 = ecs->event_thread->control.step_resume_breakpoint;
6517
8d707a12
PA
6518 if (sr_bp != NULL
6519 && sr_bp->loc->permanent
af48d08f
PA
6520 && sr_bp->type == bp_hp_step_resume
6521 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6522 {
1eb8556f 6523 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
6524 delete_step_resume_breakpoint (ecs->event_thread);
6525 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6526 }
6527 }
6528
cdaa5b73
PA
6529 /* We come here if we hit a breakpoint but should not stop for it.
6530 Possibly we also were stepping and should stop for that. So fall
6531 through and test for stepping. But, if not stepping, do not
6532 stop. */
c906108c 6533
a7212384
UW
6534 /* In all-stop mode, if we're currently stepping but have stopped in
6535 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6536 if (switch_back_to_stepped_thread (ecs))
6537 return;
776f04fa 6538
8358c15c 6539 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6540 {
1eb8556f 6541 infrun_debug_printf ("step-resume breakpoint is inserted");
527159b7 6542
488f131b 6543 /* Having a step-resume breakpoint overrides anything
dda83cd7
SM
6544 else having to do with stepping commands until
6545 that breakpoint is reached. */
488f131b
JB
6546 keep_going (ecs);
6547 return;
6548 }
c5aa993b 6549
16c381f0 6550 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6551 {
1eb8556f 6552 infrun_debug_printf ("no stepping, continue");
488f131b 6553 /* Likewise if we aren't even stepping. */
488f131b
JB
6554 keep_going (ecs);
6555 return;
6556 }
c5aa993b 6557
4b7703ad
JB
6558 /* Re-fetch current thread's frame in case the code above caused
6559 the frame cache to be re-initialized, making our FRAME variable
6560 a dangling pointer. */
6561 frame = get_current_frame ();
628fe4e4 6562 gdbarch = get_frame_arch (frame);
7e324e48 6563 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6564
488f131b 6565 /* If stepping through a line, keep going if still within it.
c906108c 6566
488f131b
JB
6567 Note that step_range_end is the address of the first instruction
6568 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6569 within it!
6570
6571 Note also that during reverse execution, we may be stepping
6572 through a function epilogue and therefore must detect when
6573 the current-frame changes in the middle of a line. */
6574
f2ffa92b
PA
6575 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6576 ecs->event_thread)
31410e84 6577 && (execution_direction != EXEC_REVERSE
388a8562 6578 || frame_id_eq (get_frame_id (frame),
16c381f0 6579 ecs->event_thread->control.step_frame_id)))
488f131b 6580 {
1eb8556f
SM
6581 infrun_debug_printf
6582 ("stepping inside range [%s-%s]",
6583 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6584 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6585
c1e36e3e
PA
6586 /* Tentatively re-enable range stepping; `resume' disables it if
6587 necessary (e.g., if we're stepping over a breakpoint or we
6588 have software watchpoints). */
6589 ecs->event_thread->control.may_range_step = 1;
6590
b2175913
MS
6591 /* When stepping backward, stop at beginning of line range
6592 (unless it's the function entry point, in which case
6593 keep going back to the call point). */
f2ffa92b 6594 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6595 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6596 && stop_pc != ecs->stop_func_start
6597 && execution_direction == EXEC_REVERSE)
bdc36728 6598 end_stepping_range (ecs);
b2175913
MS
6599 else
6600 keep_going (ecs);
6601
488f131b
JB
6602 return;
6603 }
c5aa993b 6604
488f131b 6605 /* We stepped out of the stepping range. */
c906108c 6606
488f131b 6607 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6608 loader dynamic symbol resolution code...
6609
6610 EXEC_FORWARD: we keep on single stepping until we exit the run
6611 time loader code and reach the callee's address.
6612
6613 EXEC_REVERSE: we've already executed the callee (backward), and
6614 the runtime loader code is handled just like any other
6615 undebuggable function call. Now we need only keep stepping
6616 backward through the trampoline code, and that's handled further
6617 down, so there is nothing for us to do here. */
6618
6619 if (execution_direction != EXEC_REVERSE
16c381f0 6620 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6621 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6622 {
4c8c40e6 6623 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6624 gdbarch_skip_solib_resolver (gdbarch,
6625 ecs->event_thread->suspend.stop_pc);
c906108c 6626
1eb8556f 6627 infrun_debug_printf ("stepped into dynsym resolve code");
527159b7 6628
488f131b
JB
6629 if (pc_after_resolver)
6630 {
6631 /* Set up a step-resume breakpoint at the address
6632 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6633 symtab_and_line sr_sal;
488f131b 6634 sr_sal.pc = pc_after_resolver;
6c95b8df 6635 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6636
a6d9a66e
UW
6637 insert_step_resume_breakpoint_at_sal (gdbarch,
6638 sr_sal, null_frame_id);
c5aa993b 6639 }
c906108c 6640
488f131b
JB
6641 keep_going (ecs);
6642 return;
6643 }
c906108c 6644
1d509aa6
MM
6645 /* Step through an indirect branch thunk. */
6646 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6647 && gdbarch_in_indirect_branch_thunk (gdbarch,
6648 ecs->event_thread->suspend.stop_pc))
1d509aa6 6649 {
1eb8556f 6650 infrun_debug_printf ("stepped into indirect branch thunk");
1d509aa6
MM
6651 keep_going (ecs);
6652 return;
6653 }
6654
16c381f0
JK
6655 if (ecs->event_thread->control.step_range_end != 1
6656 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6657 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6658 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6659 {
1eb8556f 6660 infrun_debug_printf ("stepped into signal trampoline");
42edda50 6661 /* The inferior, while doing a "step" or "next", has ended up in
dda83cd7
SM
6662 a signal trampoline (either by a signal being delivered or by
6663 the signal handler returning). Just single-step until the
6664 inferior leaves the trampoline (either by calling the handler
6665 or returning). */
488f131b
JB
6666 keep_going (ecs);
6667 return;
6668 }
c906108c 6669
14132e89
MR
6670 /* If we're in the return path from a shared library trampoline,
6671 we want to proceed through the trampoline when stepping. */
6672 /* macro/2012-04-25: This needs to come before the subroutine
6673 call check below as on some targets return trampolines look
6674 like subroutine calls (MIPS16 return thunks). */
6675 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6676 ecs->event_thread->suspend.stop_pc,
6677 ecs->stop_func_name)
14132e89
MR
6678 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6679 {
6680 /* Determine where this trampoline returns. */
f2ffa92b
PA
6681 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6682 CORE_ADDR real_stop_pc
6683 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 6684
1eb8556f 6685 infrun_debug_printf ("stepped into solib return tramp");
14132e89
MR
6686
6687 /* Only proceed through if we know where it's going. */
6688 if (real_stop_pc)
6689 {
6690 /* And put the step-breakpoint there and go until there. */
51abb421 6691 symtab_and_line sr_sal;
14132e89
MR
6692 sr_sal.pc = real_stop_pc;
6693 sr_sal.section = find_pc_overlay (sr_sal.pc);
6694 sr_sal.pspace = get_frame_program_space (frame);
6695
6696 /* Do not specify what the fp should be when we stop since
6697 on some machines the prologue is where the new fp value
6698 is established. */
6699 insert_step_resume_breakpoint_at_sal (gdbarch,
6700 sr_sal, null_frame_id);
6701
6702 /* Restart without fiddling with the step ranges or
6703 other state. */
6704 keep_going (ecs);
6705 return;
6706 }
6707 }
6708
c17eaafe
DJ
6709 /* Check for subroutine calls. The check for the current frame
6710 equalling the step ID is not necessary - the check of the
6711 previous frame's ID is sufficient - but it is a common case and
6712 cheaper than checking the previous frame's ID.
14e60db5
DJ
6713
6714 NOTE: frame_id_eq will never report two invalid frame IDs as
6715 being equal, so to get into this block, both the current and
6716 previous frame must have valid frame IDs. */
005ca36a
JB
6717 /* The outer_frame_id check is a heuristic to detect stepping
6718 through startup code. If we step over an instruction which
6719 sets the stack pointer from an invalid value to a valid value,
6720 we may detect that as a subroutine call from the mythical
6721 "outermost" function. This could be fixed by marking
6722 outermost frames as !stack_p,code_p,special_p. Then the
6723 initial outermost frame, before sp was valid, would
ce6cca6d 6724 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6725 for more. */
edb3359d 6726 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6727 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6728 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6729 ecs->event_thread->control.step_stack_frame_id)
6730 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6731 outer_frame_id)
885eeb5b 6732 || (ecs->event_thread->control.step_start_function
f2ffa92b 6733 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6734 {
f2ffa92b 6735 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6736 CORE_ADDR real_stop_pc;
8fb3e588 6737
1eb8556f 6738 infrun_debug_printf ("stepped into subroutine");
527159b7 6739
b7a084be 6740 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6741 {
6742 /* I presume that step_over_calls is only 0 when we're
6743 supposed to be stepping at the assembly language level
6744 ("stepi"). Just stop. */
388a8562 6745 /* And this works the same backward as frontward. MVS */
bdc36728 6746 end_stepping_range (ecs);
95918acb
AC
6747 return;
6748 }
8fb3e588 6749
388a8562
MS
6750 /* Reverse stepping through solib trampolines. */
6751
6752 if (execution_direction == EXEC_REVERSE
16c381f0 6753 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6754 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6755 || (ecs->stop_func_start == 0
6756 && in_solib_dynsym_resolve_code (stop_pc))))
6757 {
6758 /* Any solib trampoline code can be handled in reverse
6759 by simply continuing to single-step. We have already
6760 executed the solib function (backwards), and a few
6761 steps will take us back through the trampoline to the
6762 caller. */
6763 keep_going (ecs);
6764 return;
6765 }
6766
16c381f0 6767 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6768 {
b2175913
MS
6769 /* We're doing a "next".
6770
6771 Normal (forward) execution: set a breakpoint at the
6772 callee's return address (the address at which the caller
6773 will resume).
6774
6775 Reverse (backward) execution. set the step-resume
6776 breakpoint at the start of the function that we just
6777 stepped into (backwards), and continue to there. When we
6130d0b7 6778 get there, we'll need to single-step back to the caller. */
b2175913
MS
6779
6780 if (execution_direction == EXEC_REVERSE)
6781 {
acf9414f
JK
6782 /* If we're already at the start of the function, we've either
6783 just stepped backward into a single instruction function,
6784 or stepped back out of a signal handler to the first instruction
6785 of the function. Just keep going, which will single-step back
6786 to the caller. */
58c48e72 6787 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6788 {
acf9414f 6789 /* Normal function call return (static or dynamic). */
51abb421 6790 symtab_and_line sr_sal;
acf9414f
JK
6791 sr_sal.pc = ecs->stop_func_start;
6792 sr_sal.pspace = get_frame_program_space (frame);
6793 insert_step_resume_breakpoint_at_sal (gdbarch,
6794 sr_sal, null_frame_id);
6795 }
b2175913
MS
6796 }
6797 else
568d6575 6798 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6799
8567c30f
AC
6800 keep_going (ecs);
6801 return;
6802 }
a53c66de 6803
95918acb 6804 /* If we are in a function call trampoline (a stub between the
dda83cd7
SM
6805 calling routine and the real function), locate the real
6806 function. That's what tells us (a) whether we want to step
6807 into it at all, and (b) what prologue we want to run to the
6808 end of, if we do step into it. */
568d6575 6809 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6810 if (real_stop_pc == 0)
568d6575 6811 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6812 if (real_stop_pc != 0)
6813 ecs->stop_func_start = real_stop_pc;
8fb3e588 6814
db5f024e 6815 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6816 {
51abb421 6817 symtab_and_line sr_sal;
1b2bfbb9 6818 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6819 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6820
a6d9a66e
UW
6821 insert_step_resume_breakpoint_at_sal (gdbarch,
6822 sr_sal, null_frame_id);
8fb3e588
AC
6823 keep_going (ecs);
6824 return;
1b2bfbb9
RC
6825 }
6826
95918acb 6827 /* If we have line number information for the function we are
1bfeeb0f
JL
6828 thinking of stepping into and the function isn't on the skip
6829 list, step into it.
95918acb 6830
dda83cd7
SM
6831 If there are several symtabs at that PC (e.g. with include
6832 files), just want to know whether *any* of them have line
6833 numbers. find_pc_line handles this. */
95918acb
AC
6834 {
6835 struct symtab_and_line tmp_sal;
8fb3e588 6836
95918acb 6837 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6838 if (tmp_sal.line != 0
85817405 6839 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6840 tmp_sal)
6841 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6842 {
b2175913 6843 if (execution_direction == EXEC_REVERSE)
568d6575 6844 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6845 else
568d6575 6846 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6847 return;
6848 }
6849 }
6850
6851 /* If we have no line number and the step-stop-if-no-debug is
dda83cd7
SM
6852 set, we stop the step so that the user has a chance to switch
6853 in assembly mode. */
16c381f0 6854 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6855 && step_stop_if_no_debug)
95918acb 6856 {
bdc36728 6857 end_stepping_range (ecs);
95918acb
AC
6858 return;
6859 }
6860
b2175913
MS
6861 if (execution_direction == EXEC_REVERSE)
6862 {
acf9414f
JK
6863 /* If we're already at the start of the function, we've either just
6864 stepped backward into a single instruction function without line
6865 number info, or stepped back out of a signal handler to the first
6866 instruction of the function without line number info. Just keep
6867 going, which will single-step back to the caller. */
6868 if (ecs->stop_func_start != stop_pc)
6869 {
6870 /* Set a breakpoint at callee's start address.
6871 From there we can step once and be back in the caller. */
51abb421 6872 symtab_and_line sr_sal;
acf9414f
JK
6873 sr_sal.pc = ecs->stop_func_start;
6874 sr_sal.pspace = get_frame_program_space (frame);
6875 insert_step_resume_breakpoint_at_sal (gdbarch,
6876 sr_sal, null_frame_id);
6877 }
b2175913
MS
6878 }
6879 else
6880 /* Set a breakpoint at callee's return address (the address
6881 at which the caller will resume). */
568d6575 6882 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6883
95918acb 6884 keep_going (ecs);
488f131b 6885 return;
488f131b 6886 }
c906108c 6887
fdd654f3
MS
6888 /* Reverse stepping through solib trampolines. */
6889
6890 if (execution_direction == EXEC_REVERSE
16c381f0 6891 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 6892 {
f2ffa92b
PA
6893 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6894
fdd654f3
MS
6895 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6896 || (ecs->stop_func_start == 0
6897 && in_solib_dynsym_resolve_code (stop_pc)))
6898 {
6899 /* Any solib trampoline code can be handled in reverse
6900 by simply continuing to single-step. We have already
6901 executed the solib function (backwards), and a few
6902 steps will take us back through the trampoline to the
6903 caller. */
6904 keep_going (ecs);
6905 return;
6906 }
6907 else if (in_solib_dynsym_resolve_code (stop_pc))
6908 {
6909 /* Stepped backward into the solib dynsym resolver.
6910 Set a breakpoint at its start and continue, then
6911 one more step will take us out. */
51abb421 6912 symtab_and_line sr_sal;
fdd654f3 6913 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6914 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6915 insert_step_resume_breakpoint_at_sal (gdbarch,
6916 sr_sal, null_frame_id);
6917 keep_going (ecs);
6918 return;
6919 }
6920 }
6921
8c95582d
AB
6922 /* This always returns the sal for the inner-most frame when we are in a
6923 stack of inlined frames, even if GDB actually believes that it is in a
6924 more outer frame. This is checked for below by calls to
6925 inline_skipped_frames. */
f2ffa92b 6926 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 6927
1b2bfbb9
RC
6928 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6929 the trampoline processing logic, however, there are some trampolines
6930 that have no names, so we should do trampoline handling first. */
16c381f0 6931 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6932 && ecs->stop_func_name == NULL
2afb61aa 6933 && stop_pc_sal.line == 0)
1b2bfbb9 6934 {
1eb8556f 6935 infrun_debug_printf ("stepped into undebuggable function");
527159b7 6936
1b2bfbb9 6937 /* The inferior just stepped into, or returned to, an
dda83cd7
SM
6938 undebuggable function (where there is no debugging information
6939 and no line number corresponding to the address where the
6940 inferior stopped). Since we want to skip this kind of code,
6941 we keep going until the inferior returns from this
6942 function - unless the user has asked us not to (via
6943 set step-mode) or we no longer know how to get back
6944 to the call site. */
14e60db5 6945 if (step_stop_if_no_debug
c7ce8faa 6946 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6947 {
6948 /* If we have no line number and the step-stop-if-no-debug
6949 is set, we stop the step so that the user has a chance to
6950 switch in assembly mode. */
bdc36728 6951 end_stepping_range (ecs);
1b2bfbb9
RC
6952 return;
6953 }
6954 else
6955 {
6956 /* Set a breakpoint at callee's return address (the address
6957 at which the caller will resume). */
568d6575 6958 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6959 keep_going (ecs);
6960 return;
6961 }
6962 }
6963
16c381f0 6964 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6965 {
6966 /* It is stepi or nexti. We always want to stop stepping after
dda83cd7 6967 one instruction. */
1eb8556f 6968 infrun_debug_printf ("stepi/nexti");
bdc36728 6969 end_stepping_range (ecs);
1b2bfbb9
RC
6970 return;
6971 }
6972
2afb61aa 6973 if (stop_pc_sal.line == 0)
488f131b
JB
6974 {
6975 /* We have no line number information. That means to stop
dda83cd7
SM
6976 stepping (does this always happen right after one instruction,
6977 when we do "s" in a function with no line numbers,
6978 or can this happen as a result of a return or longjmp?). */
1eb8556f 6979 infrun_debug_printf ("line number info");
bdc36728 6980 end_stepping_range (ecs);
488f131b
JB
6981 return;
6982 }
c906108c 6983
edb3359d
DJ
6984 /* Look for "calls" to inlined functions, part one. If the inline
6985 frame machinery detected some skipped call sites, we have entered
6986 a new inline function. */
6987
6988 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6989 ecs->event_thread->control.step_frame_id)
00431a78 6990 && inline_skipped_frames (ecs->event_thread))
edb3359d 6991 {
1eb8556f 6992 infrun_debug_printf ("stepped into inlined function");
edb3359d 6993
51abb421 6994 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 6995
16c381f0 6996 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
6997 {
6998 /* For "step", we're going to stop. But if the call site
6999 for this inlined function is on the same source line as
7000 we were previously stepping, go down into the function
7001 first. Otherwise stop at the call site. */
7002
7003 if (call_sal.line == ecs->event_thread->current_line
7004 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
7005 {
7006 step_into_inline_frame (ecs->event_thread);
7007 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7008 {
7009 keep_going (ecs);
7010 return;
7011 }
7012 }
edb3359d 7013
bdc36728 7014 end_stepping_range (ecs);
edb3359d
DJ
7015 return;
7016 }
7017 else
7018 {
7019 /* For "next", we should stop at the call site if it is on a
7020 different source line. Otherwise continue through the
7021 inlined function. */
7022 if (call_sal.line == ecs->event_thread->current_line
7023 && call_sal.symtab == ecs->event_thread->current_symtab)
7024 keep_going (ecs);
7025 else
bdc36728 7026 end_stepping_range (ecs);
edb3359d
DJ
7027 return;
7028 }
7029 }
7030
7031 /* Look for "calls" to inlined functions, part two. If we are still
7032 in the same real function we were stepping through, but we have
7033 to go further up to find the exact frame ID, we are stepping
7034 through a more inlined call beyond its call site. */
7035
7036 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7037 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 7038 ecs->event_thread->control.step_frame_id)
edb3359d 7039 && stepped_in_from (get_current_frame (),
16c381f0 7040 ecs->event_thread->control.step_frame_id))
edb3359d 7041 {
1eb8556f 7042 infrun_debug_printf ("stepping through inlined function");
edb3359d 7043
4a4c04f1
BE
7044 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7045 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
7046 keep_going (ecs);
7047 else
bdc36728 7048 end_stepping_range (ecs);
edb3359d
DJ
7049 return;
7050 }
7051
8c95582d 7052 bool refresh_step_info = true;
f2ffa92b 7053 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
7054 && (ecs->event_thread->current_line != stop_pc_sal.line
7055 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7056 {
8c95582d
AB
7057 if (stop_pc_sal.is_stmt)
7058 {
7059 /* We are at the start of a different line. So stop. Note that
7060 we don't stop if we step into the middle of a different line.
7061 That is said to make things like for (;;) statements work
7062 better. */
1eb8556f 7063 infrun_debug_printf ("stepped to a different line");
8c95582d
AB
7064 end_stepping_range (ecs);
7065 return;
7066 }
7067 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7068 ecs->event_thread->control.step_frame_id))
7069 {
7070 /* We are at the start of a different line, however, this line is
7071 not marked as a statement, and we have not changed frame. We
7072 ignore this line table entry, and continue stepping forward,
7073 looking for a better place to stop. */
7074 refresh_step_info = false;
1eb8556f
SM
7075 infrun_debug_printf ("stepped to a different line, but "
7076 "it's not the start of a statement");
8c95582d 7077 }
488f131b 7078 }
c906108c 7079
488f131b 7080 /* We aren't done stepping.
c906108c 7081
488f131b
JB
7082 Optimize by setting the stepping range to the line.
7083 (We might not be in the original line, but if we entered a
7084 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7085 things like for(;;) statements work better.)
7086
7087 If we entered a SAL that indicates a non-statement line table entry,
7088 then we update the stepping range, but we don't update the step info,
7089 which includes things like the line number we are stepping away from.
7090 This means we will stop when we find a line table entry that is marked
7091 as is-statement, even if it matches the non-statement one we just
7092 stepped into. */
c906108c 7093
16c381f0
JK
7094 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7095 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7096 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7097 if (refresh_step_info)
7098 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7099
1eb8556f 7100 infrun_debug_printf ("keep going");
488f131b 7101 keep_going (ecs);
104c1213
JM
7102}
7103
c447ac0b
PA
7104/* In all-stop mode, if we're currently stepping but have stopped in
7105 some other thread, we may need to switch back to the stepped
7106 thread. Returns true we set the inferior running, false if we left
7107 it stopped (and the event needs further processing). */
7108
c4464ade 7109static bool
c447ac0b
PA
7110switch_back_to_stepped_thread (struct execution_control_state *ecs)
7111{
fbea99ea 7112 if (!target_is_non_stop_p ())
c447ac0b 7113 {
99619bea
PA
7114 struct thread_info *stepping_thread;
7115
7116 /* If any thread is blocked on some internal breakpoint, and we
7117 simply need to step over that breakpoint to get it going
7118 again, do that first. */
7119
7120 /* However, if we see an event for the stepping thread, then we
7121 know all other threads have been moved past their breakpoints
7122 already. Let the caller check whether the step is finished,
7123 etc., before deciding to move it past a breakpoint. */
7124 if (ecs->event_thread->control.step_range_end != 0)
c4464ade 7125 return false;
99619bea
PA
7126
7127 /* Check if the current thread is blocked on an incomplete
7128 step-over, interrupted by a random signal. */
7129 if (ecs->event_thread->control.trap_expected
7130 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7131 {
1eb8556f
SM
7132 infrun_debug_printf
7133 ("need to finish step-over of [%s]",
7134 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea 7135 keep_going (ecs);
c4464ade 7136 return true;
99619bea 7137 }
2adfaa28 7138
99619bea
PA
7139 /* Check if the current thread is blocked by a single-step
7140 breakpoint of another thread. */
7141 if (ecs->hit_singlestep_breakpoint)
7142 {
1eb8556f
SM
7143 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7144 target_pid_to_str (ecs->ptid).c_str ());
99619bea 7145 keep_going (ecs);
c4464ade 7146 return true;
99619bea
PA
7147 }
7148
4d9d9d04
PA
7149 /* If this thread needs yet another step-over (e.g., stepping
7150 through a delay slot), do it first before moving on to
7151 another thread. */
7152 if (thread_still_needs_step_over (ecs->event_thread))
7153 {
1eb8556f
SM
7154 infrun_debug_printf
7155 ("thread [%s] still needs step-over",
7156 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04 7157 keep_going (ecs);
c4464ade 7158 return true;
4d9d9d04 7159 }
70509625 7160
483805cf
PA
7161 /* If scheduler locking applies even if not stepping, there's no
7162 need to walk over threads. Above we've checked whether the
7163 current thread is stepping. If some other thread not the
7164 event thread is stepping, then it must be that scheduler
7165 locking is not in effect. */
856e7dd6 7166 if (schedlock_applies (ecs->event_thread))
c4464ade 7167 return false;
483805cf 7168
4d9d9d04
PA
7169 /* Otherwise, we no longer expect a trap in the current thread.
7170 Clear the trap_expected flag before switching back -- this is
7171 what keep_going does as well, if we call it. */
7172 ecs->event_thread->control.trap_expected = 0;
7173
7174 /* Likewise, clear the signal if it should not be passed. */
7175 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7176 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7177
7178 /* Do all pending step-overs before actually proceeding with
483805cf 7179 step/next/etc. */
4d9d9d04
PA
7180 if (start_step_over ())
7181 {
7182 prepare_to_wait (ecs);
c4464ade 7183 return true;
4d9d9d04
PA
7184 }
7185
7186 /* Look for the stepping/nexting thread. */
483805cf 7187 stepping_thread = NULL;
4d9d9d04 7188
08036331 7189 for (thread_info *tp : all_non_exited_threads ())
dda83cd7 7190 {
f3f8ece4
PA
7191 switch_to_thread_no_regs (tp);
7192
fbea99ea
PA
7193 /* Ignore threads of processes the caller is not
7194 resuming. */
483805cf 7195 if (!sched_multi
5b6d1e4f
PA
7196 && (tp->inf->process_target () != ecs->target
7197 || tp->inf->pid != ecs->ptid.pid ()))
483805cf
PA
7198 continue;
7199
7200 /* When stepping over a breakpoint, we lock all threads
7201 except the one that needs to move past the breakpoint.
7202 If a non-event thread has this set, the "incomplete
7203 step-over" check above should have caught it earlier. */
372316f1
PA
7204 if (tp->control.trap_expected)
7205 {
7206 internal_error (__FILE__, __LINE__,
7207 "[%s] has inconsistent state: "
7208 "trap_expected=%d\n",
a068643d 7209 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
7210 tp->control.trap_expected);
7211 }
483805cf
PA
7212
7213 /* Did we find the stepping thread? */
7214 if (tp->control.step_range_end)
7215 {
7216 /* Yep. There should only one though. */
7217 gdb_assert (stepping_thread == NULL);
7218
7219 /* The event thread is handled at the top, before we
7220 enter this loop. */
7221 gdb_assert (tp != ecs->event_thread);
7222
7223 /* If some thread other than the event thread is
7224 stepping, then scheduler locking can't be in effect,
7225 otherwise we wouldn't have resumed the current event
7226 thread in the first place. */
856e7dd6 7227 gdb_assert (!schedlock_applies (tp));
483805cf
PA
7228
7229 stepping_thread = tp;
7230 }
99619bea
PA
7231 }
7232
483805cf 7233 if (stepping_thread != NULL)
99619bea 7234 {
1eb8556f 7235 infrun_debug_printf ("switching back to stepped thread");
c447ac0b 7236
2ac7589c
PA
7237 if (keep_going_stepped_thread (stepping_thread))
7238 {
7239 prepare_to_wait (ecs);
c4464ade 7240 return true;
2ac7589c
PA
7241 }
7242 }
f3f8ece4
PA
7243
7244 switch_to_thread (ecs->event_thread);
2ac7589c 7245 }
2adfaa28 7246
c4464ade 7247 return false;
2ac7589c 7248}
2adfaa28 7249
2ac7589c
PA
7250/* Set a previously stepped thread back to stepping. Returns true on
7251 success, false if the resume is not possible (e.g., the thread
7252 vanished). */
7253
c4464ade 7254static bool
2ac7589c
PA
7255keep_going_stepped_thread (struct thread_info *tp)
7256{
7257 struct frame_info *frame;
2ac7589c
PA
7258 struct execution_control_state ecss;
7259 struct execution_control_state *ecs = &ecss;
2adfaa28 7260
2ac7589c
PA
7261 /* If the stepping thread exited, then don't try to switch back and
7262 resume it, which could fail in several different ways depending
7263 on the target. Instead, just keep going.
2adfaa28 7264
2ac7589c
PA
7265 We can find a stepping dead thread in the thread list in two
7266 cases:
2adfaa28 7267
2ac7589c
PA
7268 - The target supports thread exit events, and when the target
7269 tries to delete the thread from the thread list, inferior_ptid
7270 pointed at the exiting thread. In such case, calling
7271 delete_thread does not really remove the thread from the list;
7272 instead, the thread is left listed, with 'exited' state.
64ce06e4 7273
2ac7589c
PA
7274 - The target's debug interface does not support thread exit
7275 events, and so we have no idea whatsoever if the previously
7276 stepping thread is still alive. For that reason, we need to
7277 synchronously query the target now. */
2adfaa28 7278
00431a78 7279 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 7280 {
1eb8556f
SM
7281 infrun_debug_printf ("not resuming previously stepped thread, it has "
7282 "vanished");
2ac7589c 7283
00431a78 7284 delete_thread (tp);
c4464ade 7285 return false;
c447ac0b 7286 }
2ac7589c 7287
1eb8556f 7288 infrun_debug_printf ("resuming previously stepped thread");
2ac7589c
PA
7289
7290 reset_ecs (ecs, tp);
00431a78 7291 switch_to_thread (tp);
2ac7589c 7292
f2ffa92b 7293 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7294 frame = get_current_frame ();
2ac7589c
PA
7295
7296 /* If the PC of the thread we were trying to single-step has
7297 changed, then that thread has trapped or been signaled, but the
7298 event has not been reported to GDB yet. Re-poll the target
7299 looking for this particular thread's event (i.e. temporarily
7300 enable schedlock) by:
7301
7302 - setting a break at the current PC
7303 - resuming that particular thread, only (by setting trap
7304 expected)
7305
7306 This prevents us continuously moving the single-step breakpoint
7307 forward, one instruction at a time, overstepping. */
7308
f2ffa92b 7309 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7310 {
7311 ptid_t resume_ptid;
7312
1eb8556f
SM
7313 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7314 paddress (target_gdbarch (), tp->prev_pc),
7315 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7316
7317 /* Clear the info of the previous step-over, as it's no longer
7318 valid (if the thread was trying to step over a breakpoint, it
7319 has already succeeded). It's what keep_going would do too,
7320 if we called it. Do this before trying to insert the sss
7321 breakpoint, otherwise if we were previously trying to step
7322 over this exact address in another thread, the breakpoint is
7323 skipped. */
7324 clear_step_over_info ();
7325 tp->control.trap_expected = 0;
7326
7327 insert_single_step_breakpoint (get_frame_arch (frame),
7328 get_frame_address_space (frame),
f2ffa92b 7329 tp->suspend.stop_pc);
2ac7589c 7330
719546c4 7331 tp->resumed = true;
fbea99ea 7332 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
c4464ade 7333 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2ac7589c
PA
7334 }
7335 else
7336 {
1eb8556f 7337 infrun_debug_printf ("expected thread still hasn't advanced");
2ac7589c
PA
7338
7339 keep_going_pass_signal (ecs);
7340 }
c4464ade
SM
7341
7342 return true;
c447ac0b
PA
7343}
7344
8b061563
PA
7345/* Is thread TP in the middle of (software or hardware)
7346 single-stepping? (Note the result of this function must never be
7347 passed directly as target_resume's STEP parameter.) */
104c1213 7348
c4464ade 7349static bool
b3444185 7350currently_stepping (struct thread_info *tp)
a7212384 7351{
8358c15c
JK
7352 return ((tp->control.step_range_end
7353 && tp->control.step_resume_breakpoint == NULL)
7354 || tp->control.trap_expected
af48d08f 7355 || tp->stepped_breakpoint
8358c15c 7356 || bpstat_should_step ());
a7212384
UW
7357}
7358
b2175913
MS
7359/* Inferior has stepped into a subroutine call with source code that
7360 we should not step over. Do step to the first line of code in
7361 it. */
c2c6d25f
JM
7362
7363static void
568d6575
UW
7364handle_step_into_function (struct gdbarch *gdbarch,
7365 struct execution_control_state *ecs)
c2c6d25f 7366{
7e324e48
GB
7367 fill_in_stop_func (gdbarch, ecs);
7368
f2ffa92b
PA
7369 compunit_symtab *cust
7370 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7371 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7372 ecs->stop_func_start
7373 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7374
51abb421 7375 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7376 /* Use the step_resume_break to step until the end of the prologue,
7377 even if that involves jumps (as it seems to on the vax under
7378 4.2). */
7379 /* If the prologue ends in the middle of a source line, continue to
7380 the end of that source line (if it is still within the function).
7381 Otherwise, just go to end of prologue. */
2afb61aa
PA
7382 if (stop_func_sal.end
7383 && stop_func_sal.pc != ecs->stop_func_start
7384 && stop_func_sal.end < ecs->stop_func_end)
7385 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7386
2dbd5e30
KB
7387 /* Architectures which require breakpoint adjustment might not be able
7388 to place a breakpoint at the computed address. If so, the test
7389 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7390 ecs->stop_func_start to an address at which a breakpoint may be
7391 legitimately placed.
8fb3e588 7392
2dbd5e30
KB
7393 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7394 made, GDB will enter an infinite loop when stepping through
7395 optimized code consisting of VLIW instructions which contain
7396 subinstructions corresponding to different source lines. On
7397 FR-V, it's not permitted to place a breakpoint on any but the
7398 first subinstruction of a VLIW instruction. When a breakpoint is
7399 set, GDB will adjust the breakpoint address to the beginning of
7400 the VLIW instruction. Thus, we need to make the corresponding
7401 adjustment here when computing the stop address. */
8fb3e588 7402
568d6575 7403 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7404 {
7405 ecs->stop_func_start
568d6575 7406 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7407 ecs->stop_func_start);
2dbd5e30
KB
7408 }
7409
f2ffa92b 7410 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7411 {
7412 /* We are already there: stop now. */
bdc36728 7413 end_stepping_range (ecs);
c2c6d25f
JM
7414 return;
7415 }
7416 else
7417 {
7418 /* Put the step-breakpoint there and go until there. */
51abb421 7419 symtab_and_line sr_sal;
c2c6d25f
JM
7420 sr_sal.pc = ecs->stop_func_start;
7421 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7422 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7423
c2c6d25f 7424 /* Do not specify what the fp should be when we stop since on
dda83cd7
SM
7425 some machines the prologue is where the new fp value is
7426 established. */
a6d9a66e 7427 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7428
7429 /* And make sure stepping stops right away then. */
16c381f0 7430 ecs->event_thread->control.step_range_end
dda83cd7 7431 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7432 }
7433 keep_going (ecs);
7434}
d4f3574e 7435
b2175913
MS
7436/* Inferior has stepped backward into a subroutine call with source
7437 code that we should not step over. Do step to the beginning of the
7438 last line of code in it. */
7439
7440static void
568d6575
UW
7441handle_step_into_function_backward (struct gdbarch *gdbarch,
7442 struct execution_control_state *ecs)
b2175913 7443{
43f3e411 7444 struct compunit_symtab *cust;
167e4384 7445 struct symtab_and_line stop_func_sal;
b2175913 7446
7e324e48
GB
7447 fill_in_stop_func (gdbarch, ecs);
7448
f2ffa92b 7449 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7450 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7451 ecs->stop_func_start
7452 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7453
f2ffa92b 7454 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7455
7456 /* OK, we're just going to keep stepping here. */
f2ffa92b 7457 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7458 {
7459 /* We're there already. Just stop stepping now. */
bdc36728 7460 end_stepping_range (ecs);
b2175913
MS
7461 }
7462 else
7463 {
7464 /* Else just reset the step range and keep going.
7465 No step-resume breakpoint, they don't work for
7466 epilogues, which can have multiple entry paths. */
16c381f0
JK
7467 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7468 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7469 keep_going (ecs);
7470 }
7471 return;
7472}
7473
d3169d93 7474/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7475 This is used to both functions and to skip over code. */
7476
7477static void
2c03e5be
PA
7478insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7479 struct symtab_and_line sr_sal,
7480 struct frame_id sr_id,
7481 enum bptype sr_type)
44cbf7b5 7482{
611c83ae
PA
7483 /* There should never be more than one step-resume or longjmp-resume
7484 breakpoint per thread, so we should never be setting a new
44cbf7b5 7485 step_resume_breakpoint when one is already active. */
8358c15c 7486 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7487 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 7488
1eb8556f
SM
7489 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7490 paddress (gdbarch, sr_sal.pc));
d3169d93 7491
8358c15c 7492 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7493 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7494}
7495
9da8c2a0 7496void
2c03e5be
PA
7497insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7498 struct symtab_and_line sr_sal,
7499 struct frame_id sr_id)
7500{
7501 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7502 sr_sal, sr_id,
7503 bp_step_resume);
44cbf7b5 7504}
7ce450bd 7505
2c03e5be
PA
7506/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7507 This is used to skip a potential signal handler.
7ce450bd 7508
14e60db5
DJ
7509 This is called with the interrupted function's frame. The signal
7510 handler, when it returns, will resume the interrupted function at
7511 RETURN_FRAME.pc. */
d303a6c7
AC
7512
7513static void
2c03e5be 7514insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7515{
f4c1edd8 7516 gdb_assert (return_frame != NULL);
d303a6c7 7517
51abb421
PA
7518 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7519
7520 symtab_and_line sr_sal;
568d6575 7521 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7522 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7523 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7524
2c03e5be
PA
7525 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7526 get_stack_frame_id (return_frame),
7527 bp_hp_step_resume);
d303a6c7
AC
7528}
7529
2c03e5be
PA
7530/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7531 is used to skip a function after stepping into it (for "next" or if
7532 the called function has no debugging information).
14e60db5
DJ
7533
7534 The current function has almost always been reached by single
7535 stepping a call or return instruction. NEXT_FRAME belongs to the
7536 current function, and the breakpoint will be set at the caller's
7537 resume address.
7538
7539 This is a separate function rather than reusing
2c03e5be 7540 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7541 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7542 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7543
7544static void
7545insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7546{
14e60db5
DJ
7547 /* We shouldn't have gotten here if we don't know where the call site
7548 is. */
c7ce8faa 7549 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7550
51abb421 7551 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7552
51abb421 7553 symtab_and_line sr_sal;
c7ce8faa
DJ
7554 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7555 frame_unwind_caller_pc (next_frame));
14e60db5 7556 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7557 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7558
a6d9a66e 7559 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7560 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7561}
7562
611c83ae
PA
7563/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7564 new breakpoint at the target of a jmp_buf. The handling of
7565 longjmp-resume uses the same mechanisms used for handling
7566 "step-resume" breakpoints. */
7567
7568static void
a6d9a66e 7569insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7570{
e81a37f7
TT
7571 /* There should never be more than one longjmp-resume breakpoint per
7572 thread, so we should never be setting a new
611c83ae 7573 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7574 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae 7575
1eb8556f
SM
7576 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7577 paddress (gdbarch, pc));
611c83ae 7578
e81a37f7 7579 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7580 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7581}
7582
186c406b
TT
7583/* Insert an exception resume breakpoint. TP is the thread throwing
7584 the exception. The block B is the block of the unwinder debug hook
7585 function. FRAME is the frame corresponding to the call to this
7586 function. SYM is the symbol of the function argument holding the
7587 target PC of the exception. */
7588
7589static void
7590insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7591 const struct block *b,
186c406b
TT
7592 struct frame_info *frame,
7593 struct symbol *sym)
7594{
a70b8144 7595 try
186c406b 7596 {
63e43d3a 7597 struct block_symbol vsym;
186c406b
TT
7598 struct value *value;
7599 CORE_ADDR handler;
7600 struct breakpoint *bp;
7601
987012b8 7602 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7603 b, VAR_DOMAIN);
63e43d3a 7604 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7605 /* If the value was optimized out, revert to the old behavior. */
7606 if (! value_optimized_out (value))
7607 {
7608 handler = value_as_address (value);
7609
1eb8556f
SM
7610 infrun_debug_printf ("exception resume at %lx",
7611 (unsigned long) handler);
186c406b
TT
7612
7613 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7614 handler,
7615 bp_exception_resume).release ();
c70a6932
JK
7616
7617 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7618 frame = NULL;
7619
5d5658a1 7620 bp->thread = tp->global_num;
186c406b
TT
7621 inferior_thread ()->control.exception_resume_breakpoint = bp;
7622 }
7623 }
230d2906 7624 catch (const gdb_exception_error &e)
492d29ea
PA
7625 {
7626 /* We want to ignore errors here. */
7627 }
186c406b
TT
7628}
7629
28106bc2
SDJ
7630/* A helper for check_exception_resume that sets an
7631 exception-breakpoint based on a SystemTap probe. */
7632
7633static void
7634insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7635 const struct bound_probe *probe,
28106bc2
SDJ
7636 struct frame_info *frame)
7637{
7638 struct value *arg_value;
7639 CORE_ADDR handler;
7640 struct breakpoint *bp;
7641
7642 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7643 if (!arg_value)
7644 return;
7645
7646 handler = value_as_address (arg_value);
7647
1eb8556f
SM
7648 infrun_debug_printf ("exception resume at %s",
7649 paddress (probe->objfile->arch (), handler));
28106bc2
SDJ
7650
7651 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7652 handler, bp_exception_resume).release ();
5d5658a1 7653 bp->thread = tp->global_num;
28106bc2
SDJ
7654 inferior_thread ()->control.exception_resume_breakpoint = bp;
7655}
7656
186c406b
TT
7657/* This is called when an exception has been intercepted. Check to
7658 see whether the exception's destination is of interest, and if so,
7659 set an exception resume breakpoint there. */
7660
7661static void
7662check_exception_resume (struct execution_control_state *ecs,
28106bc2 7663 struct frame_info *frame)
186c406b 7664{
729662a5 7665 struct bound_probe probe;
28106bc2
SDJ
7666 struct symbol *func;
7667
7668 /* First see if this exception unwinding breakpoint was set via a
7669 SystemTap probe point. If so, the probe has two arguments: the
7670 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7671 set a breakpoint there. */
6bac7473 7672 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7673 if (probe.prob)
28106bc2 7674 {
729662a5 7675 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7676 return;
7677 }
7678
7679 func = get_frame_function (frame);
7680 if (!func)
7681 return;
186c406b 7682
a70b8144 7683 try
186c406b 7684 {
3977b71f 7685 const struct block *b;
8157b174 7686 struct block_iterator iter;
186c406b
TT
7687 struct symbol *sym;
7688 int argno = 0;
7689
7690 /* The exception breakpoint is a thread-specific breakpoint on
7691 the unwinder's debug hook, declared as:
7692
7693 void _Unwind_DebugHook (void *cfa, void *handler);
7694
7695 The CFA argument indicates the frame to which control is
7696 about to be transferred. HANDLER is the destination PC.
7697
7698 We ignore the CFA and set a temporary breakpoint at HANDLER.
7699 This is not extremely efficient but it avoids issues in gdb
7700 with computing the DWARF CFA, and it also works even in weird
7701 cases such as throwing an exception from inside a signal
7702 handler. */
7703
7704 b = SYMBOL_BLOCK_VALUE (func);
7705 ALL_BLOCK_SYMBOLS (b, iter, sym)
7706 {
7707 if (!SYMBOL_IS_ARGUMENT (sym))
7708 continue;
7709
7710 if (argno == 0)
7711 ++argno;
7712 else
7713 {
7714 insert_exception_resume_breakpoint (ecs->event_thread,
7715 b, frame, sym);
7716 break;
7717 }
7718 }
7719 }
230d2906 7720 catch (const gdb_exception_error &e)
492d29ea
PA
7721 {
7722 }
186c406b
TT
7723}
7724
104c1213 7725static void
22bcd14b 7726stop_waiting (struct execution_control_state *ecs)
104c1213 7727{
1eb8556f 7728 infrun_debug_printf ("stop_waiting");
527159b7 7729
cd0fc7c3
SS
7730 /* Let callers know we don't want to wait for the inferior anymore. */
7731 ecs->wait_some_more = 0;
fbea99ea 7732
53cccef1 7733 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 7734 threads now that we're presenting the stop to the user. */
53cccef1 7735 if (!non_stop && exists_non_stop_target ())
fbea99ea 7736 stop_all_threads ();
cd0fc7c3
SS
7737}
7738
4d9d9d04
PA
7739/* Like keep_going, but passes the signal to the inferior, even if the
7740 signal is set to nopass. */
d4f3574e
SS
7741
7742static void
4d9d9d04 7743keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7744{
d7e15655 7745 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7746 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7747
d4f3574e 7748 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7749 ecs->event_thread->prev_pc
fc75c28b 7750 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 7751
4d9d9d04 7752 if (ecs->event_thread->control.trap_expected)
d4f3574e 7753 {
4d9d9d04
PA
7754 struct thread_info *tp = ecs->event_thread;
7755
1eb8556f
SM
7756 infrun_debug_printf ("%s has trap_expected set, "
7757 "resuming to collect trap",
7758 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7759
a9ba6bae
PA
7760 /* We haven't yet gotten our trap, and either: intercepted a
7761 non-signal event (e.g., a fork); or took a signal which we
7762 are supposed to pass through to the inferior. Simply
7763 continue. */
64ce06e4 7764 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7765 }
372316f1
PA
7766 else if (step_over_info_valid_p ())
7767 {
7768 /* Another thread is stepping over a breakpoint in-line. If
7769 this thread needs a step-over too, queue the request. In
7770 either case, this resume must be deferred for later. */
7771 struct thread_info *tp = ecs->event_thread;
7772
7773 if (ecs->hit_singlestep_breakpoint
7774 || thread_still_needs_step_over (tp))
7775 {
1eb8556f
SM
7776 infrun_debug_printf ("step-over already in progress: "
7777 "step-over for %s deferred",
7778 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
7779 thread_step_over_chain_enqueue (tp);
7780 }
7781 else
7782 {
1eb8556f
SM
7783 infrun_debug_printf ("step-over in progress: resume of %s deferred",
7784 target_pid_to_str (tp->ptid).c_str ());
372316f1 7785 }
372316f1 7786 }
d4f3574e
SS
7787 else
7788 {
31e77af2 7789 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7790 int remove_bp;
7791 int remove_wps;
8d297bbf 7792 step_over_what step_what;
31e77af2 7793
d4f3574e 7794 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7795 anyway (if we got a signal, the user asked it be passed to
7796 the child)
7797 -- or --
7798 We got our expected trap, but decided we should resume from
7799 it.
d4f3574e 7800
a9ba6bae 7801 We're going to run this baby now!
d4f3574e 7802
c36b740a
VP
7803 Note that insert_breakpoints won't try to re-insert
7804 already inserted breakpoints. Therefore, we don't
7805 care if breakpoints were already inserted, or not. */
a9ba6bae 7806
31e77af2
PA
7807 /* If we need to step over a breakpoint, and we're not using
7808 displaced stepping to do so, insert all breakpoints
7809 (watchpoints, etc.) but the one we're stepping over, step one
7810 instruction, and then re-insert the breakpoint when that step
7811 is finished. */
963f9c80 7812
6c4cfb24
PA
7813 step_what = thread_still_needs_step_over (ecs->event_thread);
7814
963f9c80 7815 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7816 || (step_what & STEP_OVER_BREAKPOINT));
7817 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7818
cb71640d
PA
7819 /* We can't use displaced stepping if we need to step past a
7820 watchpoint. The instruction copied to the scratch pad would
7821 still trigger the watchpoint. */
7822 if (remove_bp
3fc8eb30 7823 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7824 {
a01bda52 7825 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7826 regcache_read_pc (regcache), remove_wps,
7827 ecs->event_thread->global_num);
45e8c884 7828 }
963f9c80 7829 else if (remove_wps)
21edc42f 7830 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7831
7832 /* If we now need to do an in-line step-over, we need to stop
7833 all other threads. Note this must be done before
7834 insert_breakpoints below, because that removes the breakpoint
7835 we're about to step over, otherwise other threads could miss
7836 it. */
fbea99ea 7837 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7838 stop_all_threads ();
abbb1732 7839
31e77af2 7840 /* Stop stepping if inserting breakpoints fails. */
a70b8144 7841 try
31e77af2
PA
7842 {
7843 insert_breakpoints ();
7844 }
230d2906 7845 catch (const gdb_exception_error &e)
31e77af2
PA
7846 {
7847 exception_print (gdb_stderr, e);
22bcd14b 7848 stop_waiting (ecs);
bdf2a94a 7849 clear_step_over_info ();
31e77af2 7850 return;
d4f3574e
SS
7851 }
7852
963f9c80 7853 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7854
64ce06e4 7855 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7856 }
7857
488f131b 7858 prepare_to_wait (ecs);
d4f3574e
SS
7859}
7860
4d9d9d04
PA
7861/* Called when we should continue running the inferior, because the
7862 current event doesn't cause a user visible stop. This does the
7863 resuming part; waiting for the next event is done elsewhere. */
7864
7865static void
7866keep_going (struct execution_control_state *ecs)
7867{
7868 if (ecs->event_thread->control.trap_expected
7869 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7870 ecs->event_thread->control.trap_expected = 0;
7871
7872 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7873 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7874 keep_going_pass_signal (ecs);
7875}
7876
104c1213
JM
7877/* This function normally comes after a resume, before
7878 handle_inferior_event exits. It takes care of any last bits of
7879 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7880
104c1213
JM
7881static void
7882prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7883{
1eb8556f 7884 infrun_debug_printf ("prepare_to_wait");
104c1213 7885
104c1213 7886 ecs->wait_some_more = 1;
0b333c5e 7887
42bd97a6
PA
7888 /* If the target can't async, emulate it by marking the infrun event
7889 handler such that as soon as we get back to the event-loop, we
7890 immediately end up in fetch_inferior_event again calling
7891 target_wait. */
7892 if (!target_can_async_p ())
0b333c5e 7893 mark_infrun_async_event_handler ();
c906108c 7894}
11cf8741 7895
fd664c91 7896/* We are done with the step range of a step/next/si/ni command.
b57bacec 7897 Called once for each n of a "step n" operation. */
fd664c91
PA
7898
7899static void
bdc36728 7900end_stepping_range (struct execution_control_state *ecs)
fd664c91 7901{
bdc36728 7902 ecs->event_thread->control.stop_step = 1;
bdc36728 7903 stop_waiting (ecs);
fd664c91
PA
7904}
7905
33d62d64
JK
7906/* Several print_*_reason functions to print why the inferior has stopped.
7907 We always print something when the inferior exits, or receives a signal.
7908 The rest of the cases are dealt with later on in normal_stop and
7909 print_it_typical. Ideally there should be a call to one of these
7910 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7911 stop_waiting is called.
33d62d64 7912
fd664c91
PA
7913 Note that we don't call these directly, instead we delegate that to
7914 the interpreters, through observers. Interpreters then call these
7915 with whatever uiout is right. */
33d62d64 7916
fd664c91
PA
7917void
7918print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7919{
fd664c91 7920 /* For CLI-like interpreters, print nothing. */
33d62d64 7921
112e8700 7922 if (uiout->is_mi_like_p ())
fd664c91 7923 {
112e8700 7924 uiout->field_string ("reason",
fd664c91
PA
7925 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7926 }
7927}
33d62d64 7928
fd664c91
PA
7929void
7930print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7931{
33d62d64 7932 annotate_signalled ();
112e8700
SM
7933 if (uiout->is_mi_like_p ())
7934 uiout->field_string
7935 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7936 uiout->text ("\nProgram terminated with signal ");
33d62d64 7937 annotate_signal_name ();
112e8700 7938 uiout->field_string ("signal-name",
2ea28649 7939 gdb_signal_to_name (siggnal));
33d62d64 7940 annotate_signal_name_end ();
112e8700 7941 uiout->text (", ");
33d62d64 7942 annotate_signal_string ();
112e8700 7943 uiout->field_string ("signal-meaning",
2ea28649 7944 gdb_signal_to_string (siggnal));
33d62d64 7945 annotate_signal_string_end ();
112e8700
SM
7946 uiout->text (".\n");
7947 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7948}
7949
fd664c91
PA
7950void
7951print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7952{
fda326dd 7953 struct inferior *inf = current_inferior ();
a068643d 7954 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 7955
33d62d64
JK
7956 annotate_exited (exitstatus);
7957 if (exitstatus)
7958 {
112e8700
SM
7959 if (uiout->is_mi_like_p ())
7960 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
7961 std::string exit_code_str
7962 = string_printf ("0%o", (unsigned int) exitstatus);
7963 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
7964 plongest (inf->num), pidstr.c_str (),
7965 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
7966 }
7967 else
11cf8741 7968 {
112e8700
SM
7969 if (uiout->is_mi_like_p ())
7970 uiout->field_string
7971 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
7972 uiout->message ("[Inferior %s (%s) exited normally]\n",
7973 plongest (inf->num), pidstr.c_str ());
33d62d64 7974 }
33d62d64
JK
7975}
7976
fd664c91
PA
7977void
7978print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 7979{
f303dbd6
PA
7980 struct thread_info *thr = inferior_thread ();
7981
33d62d64
JK
7982 annotate_signal ();
7983
112e8700 7984 if (uiout->is_mi_like_p ())
f303dbd6
PA
7985 ;
7986 else if (show_thread_that_caused_stop ())
33d62d64 7987 {
f303dbd6 7988 const char *name;
33d62d64 7989
112e8700 7990 uiout->text ("\nThread ");
33eca680 7991 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
7992
7993 name = thr->name != NULL ? thr->name : target_thread_name (thr);
7994 if (name != NULL)
7995 {
112e8700 7996 uiout->text (" \"");
33eca680 7997 uiout->field_string ("name", name);
112e8700 7998 uiout->text ("\"");
f303dbd6 7999 }
33d62d64 8000 }
f303dbd6 8001 else
112e8700 8002 uiout->text ("\nProgram");
f303dbd6 8003
112e8700
SM
8004 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8005 uiout->text (" stopped");
33d62d64
JK
8006 else
8007 {
112e8700 8008 uiout->text (" received signal ");
8b93c638 8009 annotate_signal_name ();
112e8700
SM
8010 if (uiout->is_mi_like_p ())
8011 uiout->field_string
8012 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8013 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 8014 annotate_signal_name_end ();
112e8700 8015 uiout->text (", ");
8b93c638 8016 annotate_signal_string ();
112e8700 8017 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21 8018
272bb05c
JB
8019 struct regcache *regcache = get_current_regcache ();
8020 struct gdbarch *gdbarch = regcache->arch ();
8021 if (gdbarch_report_signal_info_p (gdbarch))
8022 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8023
8b93c638 8024 annotate_signal_string_end ();
33d62d64 8025 }
112e8700 8026 uiout->text (".\n");
33d62d64 8027}
252fbfc8 8028
fd664c91
PA
8029void
8030print_no_history_reason (struct ui_out *uiout)
33d62d64 8031{
112e8700 8032 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 8033}
43ff13b4 8034
0c7e1a46
PA
8035/* Print current location without a level number, if we have changed
8036 functions or hit a breakpoint. Print source line if we have one.
8037 bpstat_print contains the logic deciding in detail what to print,
8038 based on the event(s) that just occurred. */
8039
243a9253
PA
8040static void
8041print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
8042{
8043 int bpstat_ret;
f486487f 8044 enum print_what source_flag;
0c7e1a46
PA
8045 int do_frame_printing = 1;
8046 struct thread_info *tp = inferior_thread ();
8047
8048 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8049 switch (bpstat_ret)
8050 {
8051 case PRINT_UNKNOWN:
8052 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8053 should) carry around the function and does (or should) use
8054 that when doing a frame comparison. */
8055 if (tp->control.stop_step
8056 && frame_id_eq (tp->control.step_frame_id,
8057 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8058 && (tp->control.step_start_function
8059 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8060 {
8061 /* Finished step, just print source line. */
8062 source_flag = SRC_LINE;
8063 }
8064 else
8065 {
8066 /* Print location and source line. */
8067 source_flag = SRC_AND_LOC;
8068 }
8069 break;
8070 case PRINT_SRC_AND_LOC:
8071 /* Print location and source line. */
8072 source_flag = SRC_AND_LOC;
8073 break;
8074 case PRINT_SRC_ONLY:
8075 source_flag = SRC_LINE;
8076 break;
8077 case PRINT_NOTHING:
8078 /* Something bogus. */
8079 source_flag = SRC_LINE;
8080 do_frame_printing = 0;
8081 break;
8082 default:
8083 internal_error (__FILE__, __LINE__, _("Unknown value."));
8084 }
8085
8086 /* The behavior of this routine with respect to the source
8087 flag is:
8088 SRC_LINE: Print only source line
8089 LOCATION: Print only location
8090 SRC_AND_LOC: Print location and source line. */
8091 if (do_frame_printing)
8092 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8093}
8094
243a9253
PA
8095/* See infrun.h. */
8096
8097void
4c7d57e7 8098print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8099{
243a9253 8100 struct target_waitstatus last;
243a9253
PA
8101 struct thread_info *tp;
8102
5b6d1e4f 8103 get_last_target_status (nullptr, nullptr, &last);
243a9253 8104
67ad9399
TT
8105 {
8106 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8107
67ad9399 8108 print_stop_location (&last);
243a9253 8109
67ad9399 8110 /* Display the auto-display expressions. */
4c7d57e7
TT
8111 if (displays)
8112 do_displays ();
67ad9399 8113 }
243a9253
PA
8114
8115 tp = inferior_thread ();
8116 if (tp->thread_fsm != NULL
46e3ed7f 8117 && tp->thread_fsm->finished_p ())
243a9253
PA
8118 {
8119 struct return_value_info *rv;
8120
46e3ed7f 8121 rv = tp->thread_fsm->return_value ();
243a9253
PA
8122 if (rv != NULL)
8123 print_return_value (uiout, rv);
8124 }
0c7e1a46
PA
8125}
8126
388a7084
PA
8127/* See infrun.h. */
8128
8129void
8130maybe_remove_breakpoints (void)
8131{
55f6301a 8132 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
388a7084
PA
8133 {
8134 if (remove_breakpoints ())
8135 {
223ffa71 8136 target_terminal::ours_for_output ();
388a7084
PA
8137 printf_filtered (_("Cannot remove breakpoints because "
8138 "program is no longer writable.\nFurther "
8139 "execution is probably impossible.\n"));
8140 }
8141 }
8142}
8143
4c2f2a79
PA
8144/* The execution context that just caused a normal stop. */
8145
8146struct stop_context
8147{
2d844eaf
TT
8148 stop_context ();
8149 ~stop_context ();
8150
8151 DISABLE_COPY_AND_ASSIGN (stop_context);
8152
8153 bool changed () const;
8154
4c2f2a79
PA
8155 /* The stop ID. */
8156 ULONGEST stop_id;
c906108c 8157
4c2f2a79 8158 /* The event PTID. */
c906108c 8159
4c2f2a79
PA
8160 ptid_t ptid;
8161
8162 /* If stopp for a thread event, this is the thread that caused the
8163 stop. */
8164 struct thread_info *thread;
8165
8166 /* The inferior that caused the stop. */
8167 int inf_num;
8168};
8169
2d844eaf 8170/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8171 takes a strong reference to the thread. */
8172
2d844eaf 8173stop_context::stop_context ()
4c2f2a79 8174{
2d844eaf
TT
8175 stop_id = get_stop_id ();
8176 ptid = inferior_ptid;
8177 inf_num = current_inferior ()->num;
4c2f2a79 8178
d7e15655 8179 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8180 {
8181 /* Take a strong reference so that the thread can't be deleted
8182 yet. */
2d844eaf
TT
8183 thread = inferior_thread ();
8184 thread->incref ();
4c2f2a79
PA
8185 }
8186 else
2d844eaf 8187 thread = NULL;
4c2f2a79
PA
8188}
8189
8190/* Release a stop context previously created with save_stop_context.
8191 Releases the strong reference to the thread as well. */
8192
2d844eaf 8193stop_context::~stop_context ()
4c2f2a79 8194{
2d844eaf
TT
8195 if (thread != NULL)
8196 thread->decref ();
4c2f2a79
PA
8197}
8198
8199/* Return true if the current context no longer matches the saved stop
8200 context. */
8201
2d844eaf
TT
8202bool
8203stop_context::changed () const
8204{
8205 if (ptid != inferior_ptid)
8206 return true;
8207 if (inf_num != current_inferior ()->num)
8208 return true;
8209 if (thread != NULL && thread->state != THREAD_STOPPED)
8210 return true;
8211 if (get_stop_id () != stop_id)
8212 return true;
8213 return false;
4c2f2a79
PA
8214}
8215
8216/* See infrun.h. */
8217
8218int
96baa820 8219normal_stop (void)
c906108c 8220{
73b65bb0 8221 struct target_waitstatus last;
73b65bb0 8222
5b6d1e4f 8223 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8224
4c2f2a79
PA
8225 new_stop_id ();
8226
29f49a6a
PA
8227 /* If an exception is thrown from this point on, make sure to
8228 propagate GDB's knowledge of the executing state to the
8229 frontend/user running state. A QUIT is an easy exception to see
8230 here, so do this before any filtered output. */
731f534f 8231
5b6d1e4f 8232 ptid_t finish_ptid = null_ptid;
731f534f 8233
c35b1492 8234 if (!non_stop)
5b6d1e4f 8235 finish_ptid = minus_one_ptid;
e1316e60
PA
8236 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8237 || last.kind == TARGET_WAITKIND_EXITED)
8238 {
8239 /* On some targets, we may still have live threads in the
8240 inferior when we get a process exit event. E.g., for
8241 "checkpoint", when the current checkpoint/fork exits,
8242 linux-fork.c automatically switches to another fork from
8243 within target_mourn_inferior. */
731f534f 8244 if (inferior_ptid != null_ptid)
5b6d1e4f 8245 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8246 }
8247 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8248 finish_ptid = inferior_ptid;
8249
8250 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8251 if (finish_ptid != null_ptid)
8252 {
8253 maybe_finish_thread_state.emplace
8254 (user_visible_resume_target (finish_ptid), finish_ptid);
8255 }
29f49a6a 8256
b57bacec
PA
8257 /* As we're presenting a stop, and potentially removing breakpoints,
8258 update the thread list so we can tell whether there are threads
8259 running on the target. With target remote, for example, we can
8260 only learn about new threads when we explicitly update the thread
8261 list. Do this before notifying the interpreters about signal
8262 stops, end of stepping ranges, etc., so that the "new thread"
8263 output is emitted before e.g., "Program received signal FOO",
8264 instead of after. */
8265 update_thread_list ();
8266
8267 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8268 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8269
c906108c
SS
8270 /* As with the notification of thread events, we want to delay
8271 notifying the user that we've switched thread context until
8272 the inferior actually stops.
8273
73b65bb0
DJ
8274 There's no point in saying anything if the inferior has exited.
8275 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8276 "received a signal".
8277
8278 Also skip saying anything in non-stop mode. In that mode, as we
8279 don't want GDB to switch threads behind the user's back, to avoid
8280 races where the user is typing a command to apply to thread x,
8281 but GDB switches to thread y before the user finishes entering
8282 the command, fetch_inferior_event installs a cleanup to restore
8283 the current thread back to the thread the user had selected right
8284 after this event is handled, so we're not really switching, only
8285 informing of a stop. */
4f8d22e3 8286 if (!non_stop
731f534f 8287 && previous_inferior_ptid != inferior_ptid
55f6301a 8288 && target_has_execution ()
73b65bb0 8289 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8290 && last.kind != TARGET_WAITKIND_EXITED
8291 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8292 {
0e454242 8293 SWITCH_THRU_ALL_UIS ()
3b12939d 8294 {
223ffa71 8295 target_terminal::ours_for_output ();
3b12939d 8296 printf_filtered (_("[Switching to %s]\n"),
a068643d 8297 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8298 annotate_thread_changed ();
8299 }
39f77062 8300 previous_inferior_ptid = inferior_ptid;
c906108c 8301 }
c906108c 8302
0e5bf2a8
PA
8303 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8304 {
0e454242 8305 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8306 if (current_ui->prompt_state == PROMPT_BLOCKED)
8307 {
223ffa71 8308 target_terminal::ours_for_output ();
3b12939d
PA
8309 printf_filtered (_("No unwaited-for children left.\n"));
8310 }
0e5bf2a8
PA
8311 }
8312
b57bacec 8313 /* Note: this depends on the update_thread_list call above. */
388a7084 8314 maybe_remove_breakpoints ();
c906108c 8315
c906108c
SS
8316 /* If an auto-display called a function and that got a signal,
8317 delete that auto-display to avoid an infinite recursion. */
8318
8319 if (stopped_by_random_signal)
8320 disable_current_display ();
8321
0e454242 8322 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8323 {
8324 async_enable_stdin ();
8325 }
c906108c 8326
388a7084 8327 /* Let the user/frontend see the threads as stopped. */
731f534f 8328 maybe_finish_thread_state.reset ();
388a7084
PA
8329
8330 /* Select innermost stack frame - i.e., current frame is frame 0,
8331 and current location is based on that. Handle the case where the
8332 dummy call is returning after being stopped. E.g. the dummy call
8333 previously hit a breakpoint. (If the dummy call returns
8334 normally, we won't reach here.) Do this before the stop hook is
8335 run, so that it doesn't get to see the temporary dummy frame,
8336 which is not where we'll present the stop. */
8337 if (has_stack_frames ())
8338 {
8339 if (stop_stack_dummy == STOP_STACK_DUMMY)
8340 {
8341 /* Pop the empty frame that contains the stack dummy. This
8342 also restores inferior state prior to the call (struct
8343 infcall_suspend_state). */
8344 struct frame_info *frame = get_current_frame ();
8345
8346 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8347 frame_pop (frame);
8348 /* frame_pop calls reinit_frame_cache as the last thing it
8349 does which means there's now no selected frame. */
8350 }
8351
8352 select_frame (get_current_frame ());
8353
8354 /* Set the current source location. */
8355 set_current_sal_from_frame (get_current_frame ());
8356 }
dd7e2d2b
PA
8357
8358 /* Look up the hook_stop and run it (CLI internally handles problem
8359 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8360 if (stop_command != NULL)
8361 {
2d844eaf 8362 stop_context saved_context;
4c2f2a79 8363
a70b8144 8364 try
bf469271
PA
8365 {
8366 execute_cmd_pre_hook (stop_command);
8367 }
230d2906 8368 catch (const gdb_exception &ex)
bf469271
PA
8369 {
8370 exception_fprintf (gdb_stderr, ex,
8371 "Error while running hook_stop:\n");
8372 }
4c2f2a79
PA
8373
8374 /* If the stop hook resumes the target, then there's no point in
8375 trying to notify about the previous stop; its context is
8376 gone. Likewise if the command switches thread or inferior --
8377 the observers would print a stop for the wrong
8378 thread/inferior. */
2d844eaf
TT
8379 if (saved_context.changed ())
8380 return 1;
4c2f2a79 8381 }
dd7e2d2b 8382
388a7084
PA
8383 /* Notify observers about the stop. This is where the interpreters
8384 print the stop event. */
d7e15655 8385 if (inferior_ptid != null_ptid)
76727919 8386 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8387 stop_print_frame);
8388 else
76727919 8389 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8390
243a9253
PA
8391 annotate_stopped ();
8392
55f6301a 8393 if (target_has_execution ())
48844aa6
PA
8394 {
8395 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8396 && last.kind != TARGET_WAITKIND_EXITED
8397 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8398 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8399 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8400 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8401 }
6c95b8df
PA
8402
8403 /* Try to get rid of automatically added inferiors that are no
8404 longer needed. Keeping those around slows down things linearly.
8405 Note that this never removes the current inferior. */
8406 prune_inferiors ();
4c2f2a79
PA
8407
8408 return 0;
c906108c 8409}
c906108c 8410\f
c5aa993b 8411int
96baa820 8412signal_stop_state (int signo)
c906108c 8413{
d6b48e9c 8414 return signal_stop[signo];
c906108c
SS
8415}
8416
c5aa993b 8417int
96baa820 8418signal_print_state (int signo)
c906108c
SS
8419{
8420 return signal_print[signo];
8421}
8422
c5aa993b 8423int
96baa820 8424signal_pass_state (int signo)
c906108c
SS
8425{
8426 return signal_program[signo];
8427}
8428
2455069d
UW
8429static void
8430signal_cache_update (int signo)
8431{
8432 if (signo == -1)
8433 {
a493e3e2 8434 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8435 signal_cache_update (signo);
8436
8437 return;
8438 }
8439
8440 signal_pass[signo] = (signal_stop[signo] == 0
8441 && signal_print[signo] == 0
ab04a2af
TT
8442 && signal_program[signo] == 1
8443 && signal_catch[signo] == 0);
2455069d
UW
8444}
8445
488f131b 8446int
7bda5e4a 8447signal_stop_update (int signo, int state)
d4f3574e
SS
8448{
8449 int ret = signal_stop[signo];
abbb1732 8450
d4f3574e 8451 signal_stop[signo] = state;
2455069d 8452 signal_cache_update (signo);
d4f3574e
SS
8453 return ret;
8454}
8455
488f131b 8456int
7bda5e4a 8457signal_print_update (int signo, int state)
d4f3574e
SS
8458{
8459 int ret = signal_print[signo];
abbb1732 8460
d4f3574e 8461 signal_print[signo] = state;
2455069d 8462 signal_cache_update (signo);
d4f3574e
SS
8463 return ret;
8464}
8465
488f131b 8466int
7bda5e4a 8467signal_pass_update (int signo, int state)
d4f3574e
SS
8468{
8469 int ret = signal_program[signo];
abbb1732 8470
d4f3574e 8471 signal_program[signo] = state;
2455069d 8472 signal_cache_update (signo);
d4f3574e
SS
8473 return ret;
8474}
8475
ab04a2af
TT
8476/* Update the global 'signal_catch' from INFO and notify the
8477 target. */
8478
8479void
8480signal_catch_update (const unsigned int *info)
8481{
8482 int i;
8483
8484 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8485 signal_catch[i] = info[i] > 0;
8486 signal_cache_update (-1);
adc6a863 8487 target_pass_signals (signal_pass);
ab04a2af
TT
8488}
8489
c906108c 8490static void
96baa820 8491sig_print_header (void)
c906108c 8492{
3e43a32a
MS
8493 printf_filtered (_("Signal Stop\tPrint\tPass "
8494 "to program\tDescription\n"));
c906108c
SS
8495}
8496
8497static void
2ea28649 8498sig_print_info (enum gdb_signal oursig)
c906108c 8499{
2ea28649 8500 const char *name = gdb_signal_to_name (oursig);
c906108c 8501 int name_padding = 13 - strlen (name);
96baa820 8502
c906108c
SS
8503 if (name_padding <= 0)
8504 name_padding = 0;
8505
8506 printf_filtered ("%s", name);
488f131b 8507 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8508 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8509 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8510 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8511 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8512}
8513
8514/* Specify how various signals in the inferior should be handled. */
8515
8516static void
0b39b52e 8517handle_command (const char *args, int from_tty)
c906108c 8518{
c906108c 8519 int digits, wordlen;
b926417a 8520 int sigfirst, siglast;
2ea28649 8521 enum gdb_signal oursig;
c906108c 8522 int allsigs;
c906108c
SS
8523
8524 if (args == NULL)
8525 {
e2e0b3e5 8526 error_no_arg (_("signal to handle"));
c906108c
SS
8527 }
8528
1777feb0 8529 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8530
adc6a863
PA
8531 const size_t nsigs = GDB_SIGNAL_LAST;
8532 unsigned char sigs[nsigs] {};
c906108c 8533
1777feb0 8534 /* Break the command line up into args. */
c906108c 8535
773a1edc 8536 gdb_argv built_argv (args);
c906108c
SS
8537
8538 /* Walk through the args, looking for signal oursigs, signal names, and
8539 actions. Signal numbers and signal names may be interspersed with
8540 actions, with the actions being performed for all signals cumulatively
1777feb0 8541 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8542
773a1edc 8543 for (char *arg : built_argv)
c906108c 8544 {
773a1edc
TT
8545 wordlen = strlen (arg);
8546 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8547 {;
8548 }
8549 allsigs = 0;
8550 sigfirst = siglast = -1;
8551
773a1edc 8552 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8553 {
8554 /* Apply action to all signals except those used by the
1777feb0 8555 debugger. Silently skip those. */
c906108c
SS
8556 allsigs = 1;
8557 sigfirst = 0;
8558 siglast = nsigs - 1;
8559 }
773a1edc 8560 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8561 {
8562 SET_SIGS (nsigs, sigs, signal_stop);
8563 SET_SIGS (nsigs, sigs, signal_print);
8564 }
773a1edc 8565 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8566 {
8567 UNSET_SIGS (nsigs, sigs, signal_program);
8568 }
773a1edc 8569 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8570 {
8571 SET_SIGS (nsigs, sigs, signal_print);
8572 }
773a1edc 8573 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8574 {
8575 SET_SIGS (nsigs, sigs, signal_program);
8576 }
773a1edc 8577 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8578 {
8579 UNSET_SIGS (nsigs, sigs, signal_stop);
8580 }
773a1edc 8581 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8582 {
8583 SET_SIGS (nsigs, sigs, signal_program);
8584 }
773a1edc 8585 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8586 {
8587 UNSET_SIGS (nsigs, sigs, signal_print);
8588 UNSET_SIGS (nsigs, sigs, signal_stop);
8589 }
773a1edc 8590 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8591 {
8592 UNSET_SIGS (nsigs, sigs, signal_program);
8593 }
8594 else if (digits > 0)
8595 {
8596 /* It is numeric. The numeric signal refers to our own
8597 internal signal numbering from target.h, not to host/target
8598 signal number. This is a feature; users really should be
8599 using symbolic names anyway, and the common ones like
8600 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8601
8602 sigfirst = siglast = (int)
773a1edc
TT
8603 gdb_signal_from_command (atoi (arg));
8604 if (arg[digits] == '-')
c906108c
SS
8605 {
8606 siglast = (int)
773a1edc 8607 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8608 }
8609 if (sigfirst > siglast)
8610 {
1777feb0 8611 /* Bet he didn't figure we'd think of this case... */
b926417a 8612 std::swap (sigfirst, siglast);
c906108c
SS
8613 }
8614 }
8615 else
8616 {
773a1edc 8617 oursig = gdb_signal_from_name (arg);
a493e3e2 8618 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8619 {
8620 sigfirst = siglast = (int) oursig;
8621 }
8622 else
8623 {
8624 /* Not a number and not a recognized flag word => complain. */
773a1edc 8625 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8626 }
8627 }
8628
8629 /* If any signal numbers or symbol names were found, set flags for
dda83cd7 8630 which signals to apply actions to. */
c906108c 8631
b926417a 8632 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8633 {
2ea28649 8634 switch ((enum gdb_signal) signum)
c906108c 8635 {
a493e3e2
PA
8636 case GDB_SIGNAL_TRAP:
8637 case GDB_SIGNAL_INT:
c906108c
SS
8638 if (!allsigs && !sigs[signum])
8639 {
9e2f0ad4 8640 if (query (_("%s is used by the debugger.\n\
3e43a32a 8641Are you sure you want to change it? "),
2ea28649 8642 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8643 {
8644 sigs[signum] = 1;
8645 }
8646 else
c119e040 8647 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8648 }
8649 break;
a493e3e2
PA
8650 case GDB_SIGNAL_0:
8651 case GDB_SIGNAL_DEFAULT:
8652 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8653 /* Make sure that "all" doesn't print these. */
8654 break;
8655 default:
8656 sigs[signum] = 1;
8657 break;
8658 }
8659 }
c906108c
SS
8660 }
8661
b926417a 8662 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8663 if (sigs[signum])
8664 {
2455069d 8665 signal_cache_update (-1);
adc6a863
PA
8666 target_pass_signals (signal_pass);
8667 target_program_signals (signal_program);
c906108c 8668
3a031f65
PA
8669 if (from_tty)
8670 {
8671 /* Show the results. */
8672 sig_print_header ();
8673 for (; signum < nsigs; signum++)
8674 if (sigs[signum])
aead7601 8675 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8676 }
8677
8678 break;
8679 }
c906108c
SS
8680}
8681
de0bea00
MF
8682/* Complete the "handle" command. */
8683
eb3ff9a5 8684static void
de0bea00 8685handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8686 completion_tracker &tracker,
6f937416 8687 const char *text, const char *word)
de0bea00 8688{
de0bea00
MF
8689 static const char * const keywords[] =
8690 {
8691 "all",
8692 "stop",
8693 "ignore",
8694 "print",
8695 "pass",
8696 "nostop",
8697 "noignore",
8698 "noprint",
8699 "nopass",
8700 NULL,
8701 };
8702
eb3ff9a5
PA
8703 signal_completer (ignore, tracker, text, word);
8704 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8705}
8706
2ea28649
PA
8707enum gdb_signal
8708gdb_signal_from_command (int num)
ed01b82c
PA
8709{
8710 if (num >= 1 && num <= 15)
2ea28649 8711 return (enum gdb_signal) num;
ed01b82c
PA
8712 error (_("Only signals 1-15 are valid as numeric signals.\n\
8713Use \"info signals\" for a list of symbolic signals."));
8714}
8715
c906108c
SS
8716/* Print current contents of the tables set by the handle command.
8717 It is possible we should just be printing signals actually used
8718 by the current target (but for things to work right when switching
8719 targets, all signals should be in the signal tables). */
8720
8721static void
1d12d88f 8722info_signals_command (const char *signum_exp, int from_tty)
c906108c 8723{
2ea28649 8724 enum gdb_signal oursig;
abbb1732 8725
c906108c
SS
8726 sig_print_header ();
8727
8728 if (signum_exp)
8729 {
8730 /* First see if this is a symbol name. */
2ea28649 8731 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8732 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8733 {
8734 /* No, try numeric. */
8735 oursig =
2ea28649 8736 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8737 }
8738 sig_print_info (oursig);
8739 return;
8740 }
8741
8742 printf_filtered ("\n");
8743 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8744 for (oursig = GDB_SIGNAL_FIRST;
8745 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8746 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8747 {
8748 QUIT;
8749
a493e3e2
PA
8750 if (oursig != GDB_SIGNAL_UNKNOWN
8751 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8752 sig_print_info (oursig);
8753 }
8754
3e43a32a
MS
8755 printf_filtered (_("\nUse the \"handle\" command "
8756 "to change these tables.\n"));
c906108c 8757}
4aa995e1
PA
8758
8759/* The $_siginfo convenience variable is a bit special. We don't know
8760 for sure the type of the value until we actually have a chance to
7a9dd1b2 8761 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8762 also dependent on which thread you have selected.
8763
8764 1. making $_siginfo be an internalvar that creates a new value on
8765 access.
8766
8767 2. making the value of $_siginfo be an lval_computed value. */
8768
8769/* This function implements the lval_computed support for reading a
8770 $_siginfo value. */
8771
8772static void
8773siginfo_value_read (struct value *v)
8774{
8775 LONGEST transferred;
8776
a911d87a
PA
8777 /* If we can access registers, so can we access $_siginfo. Likewise
8778 vice versa. */
8779 validate_registers_access ();
c709acd1 8780
4aa995e1 8781 transferred =
8b88a78e 8782 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8783 NULL,
8784 value_contents_all_raw (v),
8785 value_offset (v),
8786 TYPE_LENGTH (value_type (v)));
8787
8788 if (transferred != TYPE_LENGTH (value_type (v)))
8789 error (_("Unable to read siginfo"));
8790}
8791
8792/* This function implements the lval_computed support for writing a
8793 $_siginfo value. */
8794
8795static void
8796siginfo_value_write (struct value *v, struct value *fromval)
8797{
8798 LONGEST transferred;
8799
a911d87a
PA
8800 /* If we can access registers, so can we access $_siginfo. Likewise
8801 vice versa. */
8802 validate_registers_access ();
c709acd1 8803
8b88a78e 8804 transferred = target_write (current_top_target (),
4aa995e1
PA
8805 TARGET_OBJECT_SIGNAL_INFO,
8806 NULL,
8807 value_contents_all_raw (fromval),
8808 value_offset (v),
8809 TYPE_LENGTH (value_type (fromval)));
8810
8811 if (transferred != TYPE_LENGTH (value_type (fromval)))
8812 error (_("Unable to write siginfo"));
8813}
8814
c8f2448a 8815static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8816 {
8817 siginfo_value_read,
8818 siginfo_value_write
8819 };
8820
8821/* Return a new value with the correct type for the siginfo object of
78267919
UW
8822 the current thread using architecture GDBARCH. Return a void value
8823 if there's no object available. */
4aa995e1 8824
2c0b251b 8825static struct value *
22d2b532
SDJ
8826siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8827 void *ignore)
4aa995e1 8828{
841de120 8829 if (target_has_stack ()
d7e15655 8830 && inferior_ptid != null_ptid
78267919 8831 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8832 {
78267919 8833 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8834
78267919 8835 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8836 }
8837
78267919 8838 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8839}
8840
c906108c 8841\f
16c381f0
JK
8842/* infcall_suspend_state contains state about the program itself like its
8843 registers and any signal it received when it last stopped.
8844 This state must be restored regardless of how the inferior function call
8845 ends (either successfully, or after it hits a breakpoint or signal)
8846 if the program is to properly continue where it left off. */
8847
6bf78e29 8848class infcall_suspend_state
7a292a7a 8849{
6bf78e29
AB
8850public:
8851 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8852 once the inferior function call has finished. */
8853 infcall_suspend_state (struct gdbarch *gdbarch,
dda83cd7
SM
8854 const struct thread_info *tp,
8855 struct regcache *regcache)
6bf78e29
AB
8856 : m_thread_suspend (tp->suspend),
8857 m_registers (new readonly_detached_regcache (*regcache))
8858 {
8859 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8860
8861 if (gdbarch_get_siginfo_type_p (gdbarch))
8862 {
dda83cd7
SM
8863 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8864 size_t len = TYPE_LENGTH (type);
6bf78e29 8865
dda83cd7 8866 siginfo_data.reset ((gdb_byte *) xmalloc (len));
6bf78e29 8867
dda83cd7
SM
8868 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8869 siginfo_data.get (), 0, len) != len)
8870 {
8871 /* Errors ignored. */
8872 siginfo_data.reset (nullptr);
8873 }
6bf78e29
AB
8874 }
8875
8876 if (siginfo_data)
8877 {
dda83cd7
SM
8878 m_siginfo_gdbarch = gdbarch;
8879 m_siginfo_data = std::move (siginfo_data);
6bf78e29
AB
8880 }
8881 }
8882
8883 /* Return a pointer to the stored register state. */
16c381f0 8884
6bf78e29
AB
8885 readonly_detached_regcache *registers () const
8886 {
8887 return m_registers.get ();
8888 }
8889
8890 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8891
8892 void restore (struct gdbarch *gdbarch,
dda83cd7
SM
8893 struct thread_info *tp,
8894 struct regcache *regcache) const
6bf78e29
AB
8895 {
8896 tp->suspend = m_thread_suspend;
8897
8898 if (m_siginfo_gdbarch == gdbarch)
8899 {
dda83cd7 8900 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6bf78e29 8901
dda83cd7
SM
8902 /* Errors ignored. */
8903 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8904 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
6bf78e29
AB
8905 }
8906
8907 /* The inferior can be gone if the user types "print exit(0)"
8908 (and perhaps other times). */
55f6301a 8909 if (target_has_execution ())
6bf78e29
AB
8910 /* NB: The register write goes through to the target. */
8911 regcache->restore (registers ());
8912 }
8913
8914private:
8915 /* How the current thread stopped before the inferior function call was
8916 executed. */
8917 struct thread_suspend_state m_thread_suspend;
8918
8919 /* The registers before the inferior function call was executed. */
8920 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 8921
35515841 8922 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 8923 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
8924
8925 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8926 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8927 content would be invalid. */
6bf78e29 8928 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
8929};
8930
cb524840
TT
8931infcall_suspend_state_up
8932save_infcall_suspend_state ()
b89667eb 8933{
b89667eb 8934 struct thread_info *tp = inferior_thread ();
1736ad11 8935 struct regcache *regcache = get_current_regcache ();
ac7936df 8936 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 8937
6bf78e29
AB
8938 infcall_suspend_state_up inf_state
8939 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 8940
6bf78e29
AB
8941 /* Having saved the current state, adjust the thread state, discarding
8942 any stop signal information. The stop signal is not useful when
8943 starting an inferior function call, and run_inferior_call will not use
8944 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 8945 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8946
b89667eb
DE
8947 return inf_state;
8948}
8949
8950/* Restore inferior session state to INF_STATE. */
8951
8952void
16c381f0 8953restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8954{
8955 struct thread_info *tp = inferior_thread ();
1736ad11 8956 struct regcache *regcache = get_current_regcache ();
ac7936df 8957 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 8958
6bf78e29 8959 inf_state->restore (gdbarch, tp, regcache);
16c381f0 8960 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8961}
8962
b89667eb 8963void
16c381f0 8964discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 8965{
dd848631 8966 delete inf_state;
b89667eb
DE
8967}
8968
daf6667d 8969readonly_detached_regcache *
16c381f0 8970get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 8971{
6bf78e29 8972 return inf_state->registers ();
b89667eb
DE
8973}
8974
16c381f0
JK
8975/* infcall_control_state contains state regarding gdb's control of the
8976 inferior itself like stepping control. It also contains session state like
8977 the user's currently selected frame. */
b89667eb 8978
16c381f0 8979struct infcall_control_state
b89667eb 8980{
16c381f0
JK
8981 struct thread_control_state thread_control;
8982 struct inferior_control_state inferior_control;
d82142e2
JK
8983
8984 /* Other fields: */
ee841dd8
TT
8985 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
8986 int stopped_by_random_signal = 0;
7a292a7a 8987
79952e69
PA
8988 /* ID and level of the selected frame when the inferior function
8989 call was made. */
ee841dd8 8990 struct frame_id selected_frame_id {};
79952e69 8991 int selected_frame_level = -1;
7a292a7a
SS
8992};
8993
c906108c 8994/* Save all of the information associated with the inferior<==>gdb
b89667eb 8995 connection. */
c906108c 8996
cb524840
TT
8997infcall_control_state_up
8998save_infcall_control_state ()
c906108c 8999{
cb524840 9000 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 9001 struct thread_info *tp = inferior_thread ();
d6b48e9c 9002 struct inferior *inf = current_inferior ();
7a292a7a 9003
16c381f0
JK
9004 inf_status->thread_control = tp->control;
9005 inf_status->inferior_control = inf->control;
d82142e2 9006
8358c15c 9007 tp->control.step_resume_breakpoint = NULL;
5b79abe7 9008 tp->control.exception_resume_breakpoint = NULL;
8358c15c 9009
16c381f0
JK
9010 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9011 chain. If caller's caller is walking the chain, they'll be happier if we
9012 hand them back the original chain when restore_infcall_control_state is
9013 called. */
9014 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
9015
9016 /* Other fields: */
9017 inf_status->stop_stack_dummy = stop_stack_dummy;
9018 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 9019
79952e69
PA
9020 save_selected_frame (&inf_status->selected_frame_id,
9021 &inf_status->selected_frame_level);
b89667eb 9022
7a292a7a 9023 return inf_status;
c906108c
SS
9024}
9025
b89667eb
DE
9026/* Restore inferior session state to INF_STATUS. */
9027
c906108c 9028void
16c381f0 9029restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 9030{
4e1c45ea 9031 struct thread_info *tp = inferior_thread ();
d6b48e9c 9032 struct inferior *inf = current_inferior ();
4e1c45ea 9033
8358c15c
JK
9034 if (tp->control.step_resume_breakpoint)
9035 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9036
5b79abe7
TT
9037 if (tp->control.exception_resume_breakpoint)
9038 tp->control.exception_resume_breakpoint->disposition
9039 = disp_del_at_next_stop;
9040
d82142e2 9041 /* Handle the bpstat_copy of the chain. */
16c381f0 9042 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 9043
16c381f0
JK
9044 tp->control = inf_status->thread_control;
9045 inf->control = inf_status->inferior_control;
d82142e2
JK
9046
9047 /* Other fields: */
9048 stop_stack_dummy = inf_status->stop_stack_dummy;
9049 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 9050
841de120 9051 if (target_has_stack ())
c906108c 9052 {
79952e69
PA
9053 restore_selected_frame (inf_status->selected_frame_id,
9054 inf_status->selected_frame_level);
c906108c 9055 }
c906108c 9056
ee841dd8 9057 delete inf_status;
7a292a7a 9058}
c906108c
SS
9059
9060void
16c381f0 9061discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9062{
8358c15c
JK
9063 if (inf_status->thread_control.step_resume_breakpoint)
9064 inf_status->thread_control.step_resume_breakpoint->disposition
9065 = disp_del_at_next_stop;
9066
5b79abe7
TT
9067 if (inf_status->thread_control.exception_resume_breakpoint)
9068 inf_status->thread_control.exception_resume_breakpoint->disposition
9069 = disp_del_at_next_stop;
9070
1777feb0 9071 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9072 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9073
ee841dd8 9074 delete inf_status;
7a292a7a 9075}
b89667eb 9076\f
7f89fd65 9077/* See infrun.h. */
0c557179
SDJ
9078
9079void
9080clear_exit_convenience_vars (void)
9081{
9082 clear_internalvar (lookup_internalvar ("_exitsignal"));
9083 clear_internalvar (lookup_internalvar ("_exitcode"));
9084}
c5aa993b 9085\f
488f131b 9086
b2175913
MS
9087/* User interface for reverse debugging:
9088 Set exec-direction / show exec-direction commands
9089 (returns error unless target implements to_set_exec_direction method). */
9090
170742de 9091enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9092static const char exec_forward[] = "forward";
9093static const char exec_reverse[] = "reverse";
9094static const char *exec_direction = exec_forward;
40478521 9095static const char *const exec_direction_names[] = {
b2175913
MS
9096 exec_forward,
9097 exec_reverse,
9098 NULL
9099};
9100
9101static void
eb4c3f4a 9102set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9103 struct cmd_list_element *cmd)
9104{
05374cfd 9105 if (target_can_execute_reverse ())
b2175913
MS
9106 {
9107 if (!strcmp (exec_direction, exec_forward))
9108 execution_direction = EXEC_FORWARD;
9109 else if (!strcmp (exec_direction, exec_reverse))
9110 execution_direction = EXEC_REVERSE;
9111 }
8bbed405
MS
9112 else
9113 {
9114 exec_direction = exec_forward;
9115 error (_("Target does not support this operation."));
9116 }
b2175913
MS
9117}
9118
9119static void
9120show_exec_direction_func (struct ui_file *out, int from_tty,
9121 struct cmd_list_element *cmd, const char *value)
9122{
9123 switch (execution_direction) {
9124 case EXEC_FORWARD:
9125 fprintf_filtered (out, _("Forward.\n"));
9126 break;
9127 case EXEC_REVERSE:
9128 fprintf_filtered (out, _("Reverse.\n"));
9129 break;
b2175913 9130 default:
d8b34453
PA
9131 internal_error (__FILE__, __LINE__,
9132 _("bogus execution_direction value: %d"),
9133 (int) execution_direction);
b2175913
MS
9134 }
9135}
9136
d4db2f36
PA
9137static void
9138show_schedule_multiple (struct ui_file *file, int from_tty,
9139 struct cmd_list_element *c, const char *value)
9140{
3e43a32a
MS
9141 fprintf_filtered (file, _("Resuming the execution of threads "
9142 "of all processes is %s.\n"), value);
d4db2f36 9143}
ad52ddc6 9144
22d2b532
SDJ
9145/* Implementation of `siginfo' variable. */
9146
9147static const struct internalvar_funcs siginfo_funcs =
9148{
9149 siginfo_make_value,
9150 NULL,
9151 NULL
9152};
9153
372316f1
PA
9154/* Callback for infrun's target events source. This is marked when a
9155 thread has a pending status to process. */
9156
9157static void
9158infrun_async_inferior_event_handler (gdb_client_data data)
9159{
b1a35af2 9160 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
9161}
9162
8087c3fa 9163#if GDB_SELF_TEST
b161a60d
SM
9164namespace selftests
9165{
9166
9167/* Verify that when two threads with the same ptid exist (from two different
9168 targets) and one of them changes ptid, we only update inferior_ptid if
9169 it is appropriate. */
9170
9171static void
9172infrun_thread_ptid_changed ()
9173{
9174 gdbarch *arch = current_inferior ()->gdbarch;
9175
9176 /* The thread which inferior_ptid represents changes ptid. */
9177 {
9178 scoped_restore_current_pspace_and_thread restore;
9179
9180 scoped_mock_context<test_target_ops> target1 (arch);
9181 scoped_mock_context<test_target_ops> target2 (arch);
9182 target2.mock_inferior.next = &target1.mock_inferior;
9183
9184 ptid_t old_ptid (111, 222);
9185 ptid_t new_ptid (111, 333);
9186
9187 target1.mock_inferior.pid = old_ptid.pid ();
9188 target1.mock_thread.ptid = old_ptid;
9189 target2.mock_inferior.pid = old_ptid.pid ();
9190 target2.mock_thread.ptid = old_ptid;
9191
9192 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9193 set_current_inferior (&target1.mock_inferior);
9194
9195 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9196
9197 gdb_assert (inferior_ptid == new_ptid);
9198 }
9199
9200 /* A thread with the same ptid as inferior_ptid, but from another target,
9201 changes ptid. */
9202 {
9203 scoped_restore_current_pspace_and_thread restore;
9204
9205 scoped_mock_context<test_target_ops> target1 (arch);
9206 scoped_mock_context<test_target_ops> target2 (arch);
9207 target2.mock_inferior.next = &target1.mock_inferior;
9208
9209 ptid_t old_ptid (111, 222);
9210 ptid_t new_ptid (111, 333);
9211
9212 target1.mock_inferior.pid = old_ptid.pid ();
9213 target1.mock_thread.ptid = old_ptid;
9214 target2.mock_inferior.pid = old_ptid.pid ();
9215 target2.mock_thread.ptid = old_ptid;
9216
9217 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9218 set_current_inferior (&target2.mock_inferior);
9219
9220 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9221
9222 gdb_assert (inferior_ptid == old_ptid);
9223 }
9224}
9225
9226} /* namespace selftests */
9227
8087c3fa
JB
9228#endif /* GDB_SELF_TEST */
9229
6c265988 9230void _initialize_infrun ();
c906108c 9231void
6c265988 9232_initialize_infrun ()
c906108c 9233{
de0bea00 9234 struct cmd_list_element *c;
c906108c 9235
372316f1
PA
9236 /* Register extra event sources in the event loop. */
9237 infrun_async_inferior_event_token
db20ebdf
SM
9238 = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
9239 "infrun");
372316f1 9240
11db9430 9241 add_info ("signals", info_signals_command, _("\
1bedd215
AC
9242What debugger does when program gets various signals.\n\
9243Specify a signal as argument to print info on that signal only."));
c906108c
SS
9244 add_info_alias ("handle", "signals", 0);
9245
de0bea00 9246 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9247Specify how to handle signals.\n\
486c7739 9248Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9249Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9250If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9251will be displayed instead.\n\
9252\n\
c906108c
SS
9253Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9254from 1-15 are allowed for compatibility with old versions of GDB.\n\
9255Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9256The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9257used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9258\n\
1bedd215 9259Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9260\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9261Stop means reenter debugger if this signal happens (implies print).\n\
9262Print means print a message if this signal happens.\n\
9263Pass means let program see this signal; otherwise program doesn't know.\n\
9264Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9265Pass and Stop may be combined.\n\
9266\n\
9267Multiple signals may be specified. Signal numbers and signal names\n\
9268may be interspersed with actions, with the actions being performed for\n\
9269all signals cumulatively specified."));
de0bea00 9270 set_cmd_completer (c, handle_completer);
486c7739 9271
c906108c 9272 if (!dbx_commands)
1a966eab
AC
9273 stop_command = add_cmd ("stop", class_obscure,
9274 not_just_help_class_command, _("\
9275There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9276This allows you to set a list of commands to be run each time execution\n\
1a966eab 9277of the program stops."), &cmdlist);
c906108c 9278
ccce17b0 9279 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
85c07804
AC
9280Set inferior debugging."), _("\
9281Show inferior debugging."), _("\
9282When non-zero, inferior specific debugging is enabled."),
ccce17b0
YQ
9283 NULL,
9284 show_debug_infrun,
9285 &setdebuglist, &showdebuglist);
527159b7 9286
3e43a32a
MS
9287 add_setshow_boolean_cmd ("displaced", class_maintenance,
9288 &debug_displaced, _("\
237fc4c9
PA
9289Set displaced stepping debugging."), _("\
9290Show displaced stepping debugging."), _("\
9291When non-zero, displaced stepping specific debugging is enabled."),
9292 NULL,
9293 show_debug_displaced,
9294 &setdebuglist, &showdebuglist);
9295
ad52ddc6
PA
9296 add_setshow_boolean_cmd ("non-stop", no_class,
9297 &non_stop_1, _("\
9298Set whether gdb controls the inferior in non-stop mode."), _("\
9299Show whether gdb controls the inferior in non-stop mode."), _("\
9300When debugging a multi-threaded program and this setting is\n\
9301off (the default, also called all-stop mode), when one thread stops\n\
9302(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9303all other threads in the program while you interact with the thread of\n\
9304interest. When you continue or step a thread, you can allow the other\n\
9305threads to run, or have them remain stopped, but while you inspect any\n\
9306thread's state, all threads stop.\n\
9307\n\
9308In non-stop mode, when one thread stops, other threads can continue\n\
9309to run freely. You'll be able to step each thread independently,\n\
9310leave it stopped or free to run as needed."),
9311 set_non_stop,
9312 show_non_stop,
9313 &setlist,
9314 &showlist);
9315
adc6a863 9316 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9317 {
9318 signal_stop[i] = 1;
9319 signal_print[i] = 1;
9320 signal_program[i] = 1;
ab04a2af 9321 signal_catch[i] = 0;
c906108c
SS
9322 }
9323
4d9d9d04
PA
9324 /* Signals caused by debugger's own actions should not be given to
9325 the program afterwards.
9326
9327 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9328 explicitly specifies that it should be delivered to the target
9329 program. Typically, that would occur when a user is debugging a
9330 target monitor on a simulator: the target monitor sets a
9331 breakpoint; the simulator encounters this breakpoint and halts
9332 the simulation handing control to GDB; GDB, noting that the stop
9333 address doesn't map to any known breakpoint, returns control back
9334 to the simulator; the simulator then delivers the hardware
9335 equivalent of a GDB_SIGNAL_TRAP to the program being
9336 debugged. */
a493e3e2
PA
9337 signal_program[GDB_SIGNAL_TRAP] = 0;
9338 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9339
9340 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9341 signal_stop[GDB_SIGNAL_ALRM] = 0;
9342 signal_print[GDB_SIGNAL_ALRM] = 0;
9343 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9344 signal_print[GDB_SIGNAL_VTALRM] = 0;
9345 signal_stop[GDB_SIGNAL_PROF] = 0;
9346 signal_print[GDB_SIGNAL_PROF] = 0;
9347 signal_stop[GDB_SIGNAL_CHLD] = 0;
9348 signal_print[GDB_SIGNAL_CHLD] = 0;
9349 signal_stop[GDB_SIGNAL_IO] = 0;
9350 signal_print[GDB_SIGNAL_IO] = 0;
9351 signal_stop[GDB_SIGNAL_POLL] = 0;
9352 signal_print[GDB_SIGNAL_POLL] = 0;
9353 signal_stop[GDB_SIGNAL_URG] = 0;
9354 signal_print[GDB_SIGNAL_URG] = 0;
9355 signal_stop[GDB_SIGNAL_WINCH] = 0;
9356 signal_print[GDB_SIGNAL_WINCH] = 0;
9357 signal_stop[GDB_SIGNAL_PRIO] = 0;
9358 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9359
cd0fc7c3
SS
9360 /* These signals are used internally by user-level thread
9361 implementations. (See signal(5) on Solaris.) Like the above
9362 signals, a healthy program receives and handles them as part of
9363 its normal operation. */
a493e3e2
PA
9364 signal_stop[GDB_SIGNAL_LWP] = 0;
9365 signal_print[GDB_SIGNAL_LWP] = 0;
9366 signal_stop[GDB_SIGNAL_WAITING] = 0;
9367 signal_print[GDB_SIGNAL_WAITING] = 0;
9368 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9369 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9370 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9371 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9372
2455069d
UW
9373 /* Update cached state. */
9374 signal_cache_update (-1);
9375
85c07804
AC
9376 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9377 &stop_on_solib_events, _("\
9378Set stopping for shared library events."), _("\
9379Show stopping for shared library events."), _("\
c906108c
SS
9380If nonzero, gdb will give control to the user when the dynamic linker\n\
9381notifies gdb of shared library events. The most common event of interest\n\
85c07804 9382to the user would be loading/unloading of a new library."),
f9e14852 9383 set_stop_on_solib_events,
920d2a44 9384 show_stop_on_solib_events,
85c07804 9385 &setlist, &showlist);
c906108c 9386
7ab04401
AC
9387 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9388 follow_fork_mode_kind_names,
9389 &follow_fork_mode_string, _("\
9390Set debugger response to a program call of fork or vfork."), _("\
9391Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9392A fork or vfork creates a new process. follow-fork-mode can be:\n\
9393 parent - the original process is debugged after a fork\n\
9394 child - the new process is debugged after a fork\n\
ea1dd7bc 9395The unfollowed process will continue to run.\n\
7ab04401
AC
9396By default, the debugger will follow the parent process."),
9397 NULL,
920d2a44 9398 show_follow_fork_mode_string,
7ab04401
AC
9399 &setlist, &showlist);
9400
6c95b8df
PA
9401 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9402 follow_exec_mode_names,
9403 &follow_exec_mode_string, _("\
9404Set debugger response to a program call of exec."), _("\
9405Show debugger response to a program call of exec."), _("\
9406An exec call replaces the program image of a process.\n\
9407\n\
9408follow-exec-mode can be:\n\
9409\n\
cce7e648 9410 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9411to this new inferior. The program the process was running before\n\
9412the exec call can be restarted afterwards by restarting the original\n\
9413inferior.\n\
9414\n\
9415 same - the debugger keeps the process bound to the same inferior.\n\
9416The new executable image replaces the previous executable loaded in\n\
9417the inferior. Restarting the inferior after the exec call restarts\n\
9418the executable the process was running after the exec call.\n\
9419\n\
9420By default, the debugger will use the same inferior."),
9421 NULL,
9422 show_follow_exec_mode_string,
9423 &setlist, &showlist);
9424
7ab04401
AC
9425 add_setshow_enum_cmd ("scheduler-locking", class_run,
9426 scheduler_enums, &scheduler_mode, _("\
9427Set mode for locking scheduler during execution."), _("\
9428Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9429off == no locking (threads may preempt at any time)\n\
9430on == full locking (no thread except the current thread may run)\n\
dda83cd7 9431 This applies to both normal execution and replay mode.\n\
f2665db5 9432step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
dda83cd7
SM
9433 In this mode, other threads may run during other commands.\n\
9434 This applies to both normal execution and replay mode.\n\
f2665db5 9435replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9436 set_schedlock_func, /* traps on target vector */
920d2a44 9437 show_scheduler_mode,
7ab04401 9438 &setlist, &showlist);
5fbbeb29 9439
d4db2f36
PA
9440 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9441Set mode for resuming threads of all processes."), _("\
9442Show mode for resuming threads of all processes."), _("\
9443When on, execution commands (such as 'continue' or 'next') resume all\n\
9444threads of all processes. When off (which is the default), execution\n\
9445commands only resume the threads of the current process. The set of\n\
9446threads that are resumed is further refined by the scheduler-locking\n\
9447mode (see help set scheduler-locking)."),
9448 NULL,
9449 show_schedule_multiple,
9450 &setlist, &showlist);
9451
5bf193a2
AC
9452 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9453Set mode of the step operation."), _("\
9454Show mode of the step operation."), _("\
9455When set, doing a step over a function without debug line information\n\
9456will stop at the first instruction of that function. Otherwise, the\n\
9457function is skipped and the step command stops at a different source line."),
9458 NULL,
920d2a44 9459 show_step_stop_if_no_debug,
5bf193a2 9460 &setlist, &showlist);
ca6724c1 9461
72d0e2c5
YQ
9462 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9463 &can_use_displaced_stepping, _("\
237fc4c9
PA
9464Set debugger's willingness to use displaced stepping."), _("\
9465Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9466If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9467supported by the target architecture. If off, gdb will not use displaced\n\
9468stepping to step over breakpoints, even if such is supported by the target\n\
9469architecture. If auto (which is the default), gdb will use displaced stepping\n\
9470if the target architecture supports it and non-stop mode is active, but will not\n\
9471use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9472 NULL,
9473 show_can_use_displaced_stepping,
9474 &setlist, &showlist);
237fc4c9 9475
b2175913
MS
9476 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9477 &exec_direction, _("Set direction of execution.\n\
9478Options are 'forward' or 'reverse'."),
9479 _("Show direction of execution (forward/reverse)."),
9480 _("Tells gdb whether to execute forward or backward."),
9481 set_exec_direction_func, show_exec_direction_func,
9482 &setlist, &showlist);
9483
6c95b8df
PA
9484 /* Set/show detach-on-fork: user-settable mode. */
9485
9486 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9487Set whether gdb will detach the child of a fork."), _("\
9488Show whether gdb will detach the child of a fork."), _("\
9489Tells gdb whether to detach the child of a fork."),
9490 NULL, NULL, &setlist, &showlist);
9491
03583c20
UW
9492 /* Set/show disable address space randomization mode. */
9493
9494 add_setshow_boolean_cmd ("disable-randomization", class_support,
9495 &disable_randomization, _("\
9496Set disabling of debuggee's virtual address space randomization."), _("\
9497Show disabling of debuggee's virtual address space randomization."), _("\
9498When this mode is on (which is the default), randomization of the virtual\n\
9499address space is disabled. Standalone programs run with the randomization\n\
9500enabled by default on some platforms."),
9501 &set_disable_randomization,
9502 &show_disable_randomization,
9503 &setlist, &showlist);
9504
ca6724c1 9505 /* ptid initializations */
ca6724c1
KB
9506 inferior_ptid = null_ptid;
9507 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9508
76727919
TT
9509 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9510 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9511 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9512 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
3b7a962d 9513 gdb::observers::inferior_execd.attach (infrun_inferior_execd);
4aa995e1
PA
9514
9515 /* Explicitly create without lookup, since that tries to create a
9516 value with a void typed value, and when we get here, gdbarch
9517 isn't initialized yet. At this point, we're quite sure there
9518 isn't another convenience variable of the same name. */
22d2b532 9519 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9520
9521 add_setshow_boolean_cmd ("observer", no_class,
9522 &observer_mode_1, _("\
9523Set whether gdb controls the inferior in observer mode."), _("\
9524Show whether gdb controls the inferior in observer mode."), _("\
9525In observer mode, GDB can get data from the inferior, but not\n\
9526affect its execution. Registers and memory may not be changed,\n\
9527breakpoints may not be set, and the program cannot be interrupted\n\
9528or signalled."),
9529 set_observer_mode,
9530 show_observer_mode,
9531 &setlist,
9532 &showlist);
b161a60d
SM
9533
9534#if GDB_SELF_TEST
9535 selftests::register_test ("infrun_thread_ptid_changed",
9536 selftests::infrun_thread_ptid_changed);
9537#endif
c906108c 9538}
This page took 2.83782 seconds and 4 git commands to generate.