gdb: include allocated/associated properties in 'maint print type'
[deliverable/binutils-gdb.git] / gdb / infrun.c
CommitLineData
ca557f44
AC
1/* Target-struct-independent code to start (run) and stop an inferior
2 process.
8926118c 3
b811d2c2 4 Copyright (C) 1986-2020 Free Software Foundation, Inc.
c906108c 5
c5aa993b 6 This file is part of GDB.
c906108c 7
c5aa993b
JM
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
c5aa993b 11 (at your option) any later version.
c906108c 12
c5aa993b
JM
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
c906108c 17
c5aa993b 18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
c906108c
SS
20
21#include "defs.h"
bab37966 22#include "displaced-stepping.h"
45741a9c 23#include "infrun.h"
c906108c
SS
24#include <ctype.h>
25#include "symtab.h"
26#include "frame.h"
27#include "inferior.h"
28#include "breakpoint.h"
c906108c
SS
29#include "gdbcore.h"
30#include "gdbcmd.h"
31#include "target.h"
2f4fcf00 32#include "target-connection.h"
c906108c
SS
33#include "gdbthread.h"
34#include "annotate.h"
1adeb98a 35#include "symfile.h"
7a292a7a 36#include "top.h"
2acceee2 37#include "inf-loop.h"
4e052eda 38#include "regcache.h"
fd0407d6 39#include "value.h"
76727919 40#include "observable.h"
f636b87d 41#include "language.h"
a77053c2 42#include "solib.h"
f17517ea 43#include "main.h"
186c406b 44#include "block.h"
034dad6f 45#include "mi/mi-common.h"
4f8d22e3 46#include "event-top.h"
96429cc8 47#include "record.h"
d02ed0bb 48#include "record-full.h"
edb3359d 49#include "inline-frame.h"
4efc6507 50#include "jit.h"
06cd862c 51#include "tracepoint.h"
1bfeeb0f 52#include "skip.h"
28106bc2
SDJ
53#include "probe.h"
54#include "objfiles.h"
de0bea00 55#include "completer.h"
9107fc8d 56#include "target-descriptions.h"
f15cb84a 57#include "target-dcache.h"
d83ad864 58#include "terminal.h"
ff862be4 59#include "solist.h"
400b5eca 60#include "gdbsupport/event-loop.h"
243a9253 61#include "thread-fsm.h"
268a13a5 62#include "gdbsupport/enum-flags.h"
5ed8105e 63#include "progspace-and-thread.h"
268a13a5 64#include "gdbsupport/gdb_optional.h"
46a62268 65#include "arch-utils.h"
268a13a5
TT
66#include "gdbsupport/scope-exit.h"
67#include "gdbsupport/forward-scope-exit.h"
06cc9596 68#include "gdbsupport/gdb_select.h"
5b6d1e4f 69#include <unordered_map>
93b54c8e 70#include "async-event.h"
b161a60d
SM
71#include "gdbsupport/selftest.h"
72#include "scoped-mock-context.h"
73#include "test-target.h"
ba988419 74#include "gdbsupport/common-debug.h"
c906108c
SS
75
76/* Prototypes for local functions */
77
2ea28649 78static void sig_print_info (enum gdb_signal);
c906108c 79
96baa820 80static void sig_print_header (void);
c906108c 81
d83ad864
DB
82static void follow_inferior_reset_breakpoints (void);
83
c4464ade 84static bool currently_stepping (struct thread_info *tp);
a289b8f6 85
2c03e5be 86static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
2484c66b
UW
87
88static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
89
2484c66b
UW
90static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
91
c4464ade 92static bool maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
8550d3b3 93
aff4e175
AB
94static void resume (gdb_signal sig);
95
5b6d1e4f
PA
96static void wait_for_inferior (inferior *inf);
97
372316f1
PA
98/* Asynchronous signal handler registered as event loop source for
99 when we have pending events ready to be passed to the core. */
100static struct async_event_handler *infrun_async_inferior_event_token;
101
102/* Stores whether infrun_async was previously enabled or disabled.
103 Starts off as -1, indicating "never enabled/disabled". */
104static int infrun_is_async = -1;
105
106/* See infrun.h. */
107
108void
109infrun_async (int enable)
110{
111 if (infrun_is_async != enable)
112 {
113 infrun_is_async = enable;
114
1eb8556f 115 infrun_debug_printf ("enable=%d", enable);
372316f1
PA
116
117 if (enable)
118 mark_async_event_handler (infrun_async_inferior_event_token);
119 else
120 clear_async_event_handler (infrun_async_inferior_event_token);
121 }
122}
123
0b333c5e
PA
124/* See infrun.h. */
125
126void
127mark_infrun_async_event_handler (void)
128{
129 mark_async_event_handler (infrun_async_inferior_event_token);
130}
131
5fbbeb29
CF
132/* When set, stop the 'step' command if we enter a function which has
133 no line number information. The normal behavior is that we step
134 over such function. */
491144b5 135bool step_stop_if_no_debug = false;
920d2a44
AC
136static void
137show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
138 struct cmd_list_element *c, const char *value)
139{
140 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
141}
5fbbeb29 142
b9f437de
PA
143/* proceed and normal_stop use this to notify the user when the
144 inferior stopped in a different thread than it had been running
145 in. */
96baa820 146
39f77062 147static ptid_t previous_inferior_ptid;
7a292a7a 148
07107ca6
LM
149/* If set (default for legacy reasons), when following a fork, GDB
150 will detach from one of the fork branches, child or parent.
151 Exactly which branch is detached depends on 'set follow-fork-mode'
152 setting. */
153
491144b5 154static bool detach_fork = true;
6c95b8df 155
94ba44a6 156bool debug_infrun = false;
920d2a44
AC
157static void
158show_debug_infrun (struct ui_file *file, int from_tty,
159 struct cmd_list_element *c, const char *value)
160{
161 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
162}
527159b7 163
03583c20
UW
164/* Support for disabling address space randomization. */
165
491144b5 166bool disable_randomization = true;
03583c20
UW
167
168static void
169show_disable_randomization (struct ui_file *file, int from_tty,
170 struct cmd_list_element *c, const char *value)
171{
172 if (target_supports_disable_randomization ())
173 fprintf_filtered (file,
174 _("Disabling randomization of debuggee's "
175 "virtual address space is %s.\n"),
176 value);
177 else
178 fputs_filtered (_("Disabling randomization of debuggee's "
179 "virtual address space is unsupported on\n"
180 "this platform.\n"), file);
181}
182
183static void
eb4c3f4a 184set_disable_randomization (const char *args, int from_tty,
03583c20
UW
185 struct cmd_list_element *c)
186{
187 if (!target_supports_disable_randomization ())
188 error (_("Disabling randomization of debuggee's "
189 "virtual address space is unsupported on\n"
190 "this platform."));
191}
192
d32dc48e
PA
193/* User interface for non-stop mode. */
194
491144b5
CB
195bool non_stop = false;
196static bool non_stop_1 = false;
d32dc48e
PA
197
198static void
eb4c3f4a 199set_non_stop (const char *args, int from_tty,
d32dc48e
PA
200 struct cmd_list_element *c)
201{
55f6301a 202 if (target_has_execution ())
d32dc48e
PA
203 {
204 non_stop_1 = non_stop;
205 error (_("Cannot change this setting while the inferior is running."));
206 }
207
208 non_stop = non_stop_1;
209}
210
211static void
212show_non_stop (struct ui_file *file, int from_tty,
213 struct cmd_list_element *c, const char *value)
214{
215 fprintf_filtered (file,
216 _("Controlling the inferior in non-stop mode is %s.\n"),
217 value);
218}
219
d914c394
SS
220/* "Observer mode" is somewhat like a more extreme version of
221 non-stop, in which all GDB operations that might affect the
222 target's execution have been disabled. */
223
491144b5
CB
224bool observer_mode = false;
225static bool observer_mode_1 = false;
d914c394
SS
226
227static void
eb4c3f4a 228set_observer_mode (const char *args, int from_tty,
d914c394
SS
229 struct cmd_list_element *c)
230{
55f6301a 231 if (target_has_execution ())
d914c394
SS
232 {
233 observer_mode_1 = observer_mode;
234 error (_("Cannot change this setting while the inferior is running."));
235 }
236
237 observer_mode = observer_mode_1;
238
239 may_write_registers = !observer_mode;
240 may_write_memory = !observer_mode;
241 may_insert_breakpoints = !observer_mode;
242 may_insert_tracepoints = !observer_mode;
243 /* We can insert fast tracepoints in or out of observer mode,
244 but enable them if we're going into this mode. */
245 if (observer_mode)
491144b5 246 may_insert_fast_tracepoints = true;
d914c394
SS
247 may_stop = !observer_mode;
248 update_target_permissions ();
249
250 /* Going *into* observer mode we must force non-stop, then
251 going out we leave it that way. */
252 if (observer_mode)
253 {
d914c394 254 pagination_enabled = 0;
491144b5 255 non_stop = non_stop_1 = true;
d914c394
SS
256 }
257
258 if (from_tty)
259 printf_filtered (_("Observer mode is now %s.\n"),
260 (observer_mode ? "on" : "off"));
261}
262
263static void
264show_observer_mode (struct ui_file *file, int from_tty,
265 struct cmd_list_element *c, const char *value)
266{
267 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
268}
269
270/* This updates the value of observer mode based on changes in
271 permissions. Note that we are deliberately ignoring the values of
272 may-write-registers and may-write-memory, since the user may have
273 reason to enable these during a session, for instance to turn on a
274 debugging-related global. */
275
276void
277update_observer_mode (void)
278{
491144b5
CB
279 bool newval = (!may_insert_breakpoints
280 && !may_insert_tracepoints
281 && may_insert_fast_tracepoints
282 && !may_stop
283 && non_stop);
d914c394
SS
284
285 /* Let the user know if things change. */
286 if (newval != observer_mode)
287 printf_filtered (_("Observer mode is now %s.\n"),
288 (newval ? "on" : "off"));
289
290 observer_mode = observer_mode_1 = newval;
291}
c2c6d25f 292
c906108c
SS
293/* Tables of how to react to signals; the user sets them. */
294
adc6a863
PA
295static unsigned char signal_stop[GDB_SIGNAL_LAST];
296static unsigned char signal_print[GDB_SIGNAL_LAST];
297static unsigned char signal_program[GDB_SIGNAL_LAST];
c906108c 298
ab04a2af
TT
299/* Table of signals that are registered with "catch signal". A
300 non-zero entry indicates that the signal is caught by some "catch
adc6a863
PA
301 signal" command. */
302static unsigned char signal_catch[GDB_SIGNAL_LAST];
ab04a2af 303
2455069d
UW
304/* Table of signals that the target may silently handle.
305 This is automatically determined from the flags above,
306 and simply cached here. */
adc6a863 307static unsigned char signal_pass[GDB_SIGNAL_LAST];
2455069d 308
c906108c
SS
309#define SET_SIGS(nsigs,sigs,flags) \
310 do { \
311 int signum = (nsigs); \
312 while (signum-- > 0) \
313 if ((sigs)[signum]) \
314 (flags)[signum] = 1; \
315 } while (0)
316
317#define UNSET_SIGS(nsigs,sigs,flags) \
318 do { \
319 int signum = (nsigs); \
320 while (signum-- > 0) \
321 if ((sigs)[signum]) \
322 (flags)[signum] = 0; \
323 } while (0)
324
9b224c5e
PA
325/* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
326 this function is to avoid exporting `signal_program'. */
327
328void
329update_signals_program_target (void)
330{
adc6a863 331 target_program_signals (signal_program);
9b224c5e
PA
332}
333
1777feb0 334/* Value to pass to target_resume() to cause all threads to resume. */
39f77062 335
edb3359d 336#define RESUME_ALL minus_one_ptid
c906108c
SS
337
338/* Command list pointer for the "stop" placeholder. */
339
340static struct cmd_list_element *stop_command;
341
c906108c
SS
342/* Nonzero if we want to give control to the user when we're notified
343 of shared library events by the dynamic linker. */
628fe4e4 344int stop_on_solib_events;
f9e14852
GB
345
346/* Enable or disable optional shared library event breakpoints
347 as appropriate when the above flag is changed. */
348
349static void
eb4c3f4a
TT
350set_stop_on_solib_events (const char *args,
351 int from_tty, struct cmd_list_element *c)
f9e14852
GB
352{
353 update_solib_breakpoints ();
354}
355
920d2a44
AC
356static void
357show_stop_on_solib_events (struct ui_file *file, int from_tty,
358 struct cmd_list_element *c, const char *value)
359{
360 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
361 value);
362}
c906108c 363
c4464ade 364/* True after stop if current stack frame should be printed. */
c906108c 365
c4464ade 366static bool stop_print_frame;
c906108c 367
5b6d1e4f
PA
368/* This is a cached copy of the target/ptid/waitstatus of the last
369 event returned by target_wait()/deprecated_target_wait_hook().
370 This information is returned by get_last_target_status(). */
371static process_stratum_target *target_last_proc_target;
39f77062 372static ptid_t target_last_wait_ptid;
e02bc4cc
DS
373static struct target_waitstatus target_last_waitstatus;
374
4e1c45ea 375void init_thread_stepping_state (struct thread_info *tss);
0d1e5fa7 376
53904c9e
AC
377static const char follow_fork_mode_child[] = "child";
378static const char follow_fork_mode_parent[] = "parent";
379
40478521 380static const char *const follow_fork_mode_kind_names[] = {
53904c9e
AC
381 follow_fork_mode_child,
382 follow_fork_mode_parent,
383 NULL
ef346e04 384};
c906108c 385
53904c9e 386static const char *follow_fork_mode_string = follow_fork_mode_parent;
920d2a44
AC
387static void
388show_follow_fork_mode_string (struct ui_file *file, int from_tty,
389 struct cmd_list_element *c, const char *value)
390{
3e43a32a
MS
391 fprintf_filtered (file,
392 _("Debugger response to a program "
393 "call of fork or vfork is \"%s\".\n"),
920d2a44
AC
394 value);
395}
c906108c
SS
396\f
397
d83ad864
DB
398/* Handle changes to the inferior list based on the type of fork,
399 which process is being followed, and whether the other process
400 should be detached. On entry inferior_ptid must be the ptid of
401 the fork parent. At return inferior_ptid is the ptid of the
402 followed inferior. */
403
5ab2fbf1
SM
404static bool
405follow_fork_inferior (bool follow_child, bool detach_fork)
d83ad864
DB
406{
407 int has_vforked;
79639e11 408 ptid_t parent_ptid, child_ptid;
d83ad864
DB
409
410 has_vforked = (inferior_thread ()->pending_follow.kind
411 == TARGET_WAITKIND_VFORKED);
79639e11
PA
412 parent_ptid = inferior_ptid;
413 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
d83ad864
DB
414
415 if (has_vforked
416 && !non_stop /* Non-stop always resumes both branches. */
3b12939d 417 && current_ui->prompt_state == PROMPT_BLOCKED
d83ad864
DB
418 && !(follow_child || detach_fork || sched_multi))
419 {
420 /* The parent stays blocked inside the vfork syscall until the
421 child execs or exits. If we don't let the child run, then
422 the parent stays blocked. If we're telling the parent to run
423 in the foreground, the user will not be able to ctrl-c to get
424 back the terminal, effectively hanging the debug session. */
425 fprintf_filtered (gdb_stderr, _("\
426Can not resume the parent process over vfork in the foreground while\n\
427holding the child stopped. Try \"set detach-on-fork\" or \
428\"set schedule-multiple\".\n"));
d83ad864
DB
429 return 1;
430 }
431
432 if (!follow_child)
433 {
434 /* Detach new forked process? */
435 if (detach_fork)
436 {
d83ad864
DB
437 /* Before detaching from the child, remove all breakpoints
438 from it. If we forked, then this has already been taken
439 care of by infrun.c. If we vforked however, any
440 breakpoint inserted in the parent is visible in the
441 child, even those added while stopped in a vfork
442 catchpoint. This will remove the breakpoints from the
443 parent also, but they'll be reinserted below. */
444 if (has_vforked)
445 {
446 /* Keep breakpoints list in sync. */
00431a78 447 remove_breakpoints_inf (current_inferior ());
d83ad864
DB
448 }
449
f67c0c91 450 if (print_inferior_events)
d83ad864 451 {
8dd06f7a 452 /* Ensure that we have a process ptid. */
e99b03dc 453 ptid_t process_ptid = ptid_t (child_ptid.pid ());
8dd06f7a 454
223ffa71 455 target_terminal::ours_for_output ();
d83ad864 456 fprintf_filtered (gdb_stdlog,
f67c0c91 457 _("[Detaching after %s from child %s]\n"),
6f259a23 458 has_vforked ? "vfork" : "fork",
a068643d 459 target_pid_to_str (process_ptid).c_str ());
d83ad864
DB
460 }
461 }
462 else
463 {
464 struct inferior *parent_inf, *child_inf;
d83ad864
DB
465
466 /* Add process to GDB's tables. */
e99b03dc 467 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
468
469 parent_inf = current_inferior ();
470 child_inf->attach_flag = parent_inf->attach_flag;
471 copy_terminal_info (child_inf, parent_inf);
472 child_inf->gdbarch = parent_inf->gdbarch;
473 copy_inferior_target_desc_info (child_inf, parent_inf);
474
5ed8105e 475 scoped_restore_current_pspace_and_thread restore_pspace_thread;
d83ad864 476
2a00d7ce 477 set_current_inferior (child_inf);
5b6d1e4f 478 switch_to_no_thread ();
d83ad864 479 child_inf->symfile_flags = SYMFILE_NO_READ;
5b6d1e4f 480 push_target (parent_inf->process_target ());
18493a00
PA
481 thread_info *child_thr
482 = add_thread_silent (child_inf->process_target (), child_ptid);
d83ad864
DB
483
484 /* If this is a vfork child, then the address-space is
485 shared with the parent. */
486 if (has_vforked)
487 {
488 child_inf->pspace = parent_inf->pspace;
489 child_inf->aspace = parent_inf->aspace;
490
5b6d1e4f
PA
491 exec_on_vfork ();
492
d83ad864
DB
493 /* The parent will be frozen until the child is done
494 with the shared region. Keep track of the
495 parent. */
496 child_inf->vfork_parent = parent_inf;
497 child_inf->pending_detach = 0;
498 parent_inf->vfork_child = child_inf;
499 parent_inf->pending_detach = 0;
18493a00
PA
500
501 /* Now that the inferiors and program spaces are all
502 wired up, we can switch to the child thread (which
503 switches inferior and program space too). */
504 switch_to_thread (child_thr);
d83ad864
DB
505 }
506 else
507 {
508 child_inf->aspace = new_address_space ();
564b1e3f 509 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
510 child_inf->removable = 1;
511 set_current_program_space (child_inf->pspace);
512 clone_program_space (child_inf->pspace, parent_inf->pspace);
513
18493a00
PA
514 /* solib_create_inferior_hook relies on the current
515 thread. */
516 switch_to_thread (child_thr);
517
d83ad864
DB
518 /* Let the shared library layer (e.g., solib-svr4) learn
519 about this new process, relocate the cloned exec, pull
520 in shared libraries, and install the solib event
521 breakpoint. If a "cloned-VM" event was propagated
522 better throughout the core, this wouldn't be
523 required. */
524 solib_create_inferior_hook (0);
525 }
d83ad864
DB
526 }
527
528 if (has_vforked)
529 {
530 struct inferior *parent_inf;
531
532 parent_inf = current_inferior ();
533
534 /* If we detached from the child, then we have to be careful
535 to not insert breakpoints in the parent until the child
536 is done with the shared memory region. However, if we're
537 staying attached to the child, then we can and should
538 insert breakpoints, so that we can debug it. A
539 subsequent child exec or exit is enough to know when does
540 the child stops using the parent's address space. */
541 parent_inf->waiting_for_vfork_done = detach_fork;
542 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
543 }
544 }
545 else
546 {
547 /* Follow the child. */
548 struct inferior *parent_inf, *child_inf;
549 struct program_space *parent_pspace;
550
f67c0c91 551 if (print_inferior_events)
d83ad864 552 {
f67c0c91
SDJ
553 std::string parent_pid = target_pid_to_str (parent_ptid);
554 std::string child_pid = target_pid_to_str (child_ptid);
555
223ffa71 556 target_terminal::ours_for_output ();
6f259a23 557 fprintf_filtered (gdb_stdlog,
f67c0c91
SDJ
558 _("[Attaching after %s %s to child %s]\n"),
559 parent_pid.c_str (),
6f259a23 560 has_vforked ? "vfork" : "fork",
f67c0c91 561 child_pid.c_str ());
d83ad864
DB
562 }
563
564 /* Add the new inferior first, so that the target_detach below
565 doesn't unpush the target. */
566
e99b03dc 567 child_inf = add_inferior (child_ptid.pid ());
d83ad864
DB
568
569 parent_inf = current_inferior ();
570 child_inf->attach_flag = parent_inf->attach_flag;
571 copy_terminal_info (child_inf, parent_inf);
572 child_inf->gdbarch = parent_inf->gdbarch;
573 copy_inferior_target_desc_info (child_inf, parent_inf);
574
575 parent_pspace = parent_inf->pspace;
576
5b6d1e4f 577 process_stratum_target *target = parent_inf->process_target ();
d83ad864 578
5b6d1e4f
PA
579 {
580 /* Hold a strong reference to the target while (maybe)
581 detaching the parent. Otherwise detaching could close the
582 target. */
583 auto target_ref = target_ops_ref::new_reference (target);
584
585 /* If we're vforking, we want to hold on to the parent until
586 the child exits or execs. At child exec or exit time we
587 can remove the old breakpoints from the parent and detach
588 or resume debugging it. Otherwise, detach the parent now;
589 we'll want to reuse it's program/address spaces, but we
590 can't set them to the child before removing breakpoints
591 from the parent, otherwise, the breakpoints module could
592 decide to remove breakpoints from the wrong process (since
593 they'd be assigned to the same address space). */
594
595 if (has_vforked)
596 {
597 gdb_assert (child_inf->vfork_parent == NULL);
598 gdb_assert (parent_inf->vfork_child == NULL);
599 child_inf->vfork_parent = parent_inf;
600 child_inf->pending_detach = 0;
601 parent_inf->vfork_child = child_inf;
602 parent_inf->pending_detach = detach_fork;
603 parent_inf->waiting_for_vfork_done = 0;
604 }
605 else if (detach_fork)
606 {
607 if (print_inferior_events)
608 {
609 /* Ensure that we have a process ptid. */
610 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
611
612 target_terminal::ours_for_output ();
613 fprintf_filtered (gdb_stdlog,
614 _("[Detaching after fork from "
615 "parent %s]\n"),
616 target_pid_to_str (process_ptid).c_str ());
617 }
8dd06f7a 618
5b6d1e4f
PA
619 target_detach (parent_inf, 0);
620 parent_inf = NULL;
621 }
6f259a23 622
5b6d1e4f 623 /* Note that the detach above makes PARENT_INF dangling. */
d83ad864 624
5b6d1e4f
PA
625 /* Add the child thread to the appropriate lists, and switch
626 to this new thread, before cloning the program space, and
627 informing the solib layer about this new process. */
d83ad864 628
5b6d1e4f
PA
629 set_current_inferior (child_inf);
630 push_target (target);
631 }
d83ad864 632
18493a00 633 thread_info *child_thr = add_thread_silent (target, child_ptid);
d83ad864
DB
634
635 /* If this is a vfork child, then the address-space is shared
636 with the parent. If we detached from the parent, then we can
637 reuse the parent's program/address spaces. */
638 if (has_vforked || detach_fork)
639 {
640 child_inf->pspace = parent_pspace;
641 child_inf->aspace = child_inf->pspace->aspace;
5b6d1e4f
PA
642
643 exec_on_vfork ();
d83ad864
DB
644 }
645 else
646 {
647 child_inf->aspace = new_address_space ();
564b1e3f 648 child_inf->pspace = new program_space (child_inf->aspace);
d83ad864
DB
649 child_inf->removable = 1;
650 child_inf->symfile_flags = SYMFILE_NO_READ;
651 set_current_program_space (child_inf->pspace);
652 clone_program_space (child_inf->pspace, parent_pspace);
653
654 /* Let the shared library layer (e.g., solib-svr4) learn
655 about this new process, relocate the cloned exec, pull in
656 shared libraries, and install the solib event breakpoint.
657 If a "cloned-VM" event was propagated better throughout
658 the core, this wouldn't be required. */
659 solib_create_inferior_hook (0);
660 }
18493a00
PA
661
662 switch_to_thread (child_thr);
d83ad864
DB
663 }
664
665 return target_follow_fork (follow_child, detach_fork);
666}
667
e58b0e63
PA
668/* Tell the target to follow the fork we're stopped at. Returns true
669 if the inferior should be resumed; false, if the target for some
670 reason decided it's best not to resume. */
671
5ab2fbf1
SM
672static bool
673follow_fork ()
c906108c 674{
5ab2fbf1
SM
675 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
676 bool should_resume = true;
e58b0e63
PA
677 struct thread_info *tp;
678
679 /* Copy user stepping state to the new inferior thread. FIXME: the
680 followed fork child thread should have a copy of most of the
4e3990f4
DE
681 parent thread structure's run control related fields, not just these.
682 Initialized to avoid "may be used uninitialized" warnings from gcc. */
683 struct breakpoint *step_resume_breakpoint = NULL;
186c406b 684 struct breakpoint *exception_resume_breakpoint = NULL;
4e3990f4
DE
685 CORE_ADDR step_range_start = 0;
686 CORE_ADDR step_range_end = 0;
bf4cb9be
TV
687 int current_line = 0;
688 symtab *current_symtab = NULL;
4e3990f4 689 struct frame_id step_frame_id = { 0 };
8980e177 690 struct thread_fsm *thread_fsm = NULL;
e58b0e63
PA
691
692 if (!non_stop)
693 {
5b6d1e4f 694 process_stratum_target *wait_target;
e58b0e63
PA
695 ptid_t wait_ptid;
696 struct target_waitstatus wait_status;
697
698 /* Get the last target status returned by target_wait(). */
5b6d1e4f 699 get_last_target_status (&wait_target, &wait_ptid, &wait_status);
e58b0e63
PA
700
701 /* If not stopped at a fork event, then there's nothing else to
702 do. */
703 if (wait_status.kind != TARGET_WAITKIND_FORKED
704 && wait_status.kind != TARGET_WAITKIND_VFORKED)
705 return 1;
706
707 /* Check if we switched over from WAIT_PTID, since the event was
708 reported. */
00431a78 709 if (wait_ptid != minus_one_ptid
5b6d1e4f
PA
710 && (current_inferior ()->process_target () != wait_target
711 || inferior_ptid != wait_ptid))
e58b0e63
PA
712 {
713 /* We did. Switch back to WAIT_PTID thread, to tell the
714 target to follow it (in either direction). We'll
715 afterwards refuse to resume, and inform the user what
716 happened. */
5b6d1e4f 717 thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
00431a78 718 switch_to_thread (wait_thread);
5ab2fbf1 719 should_resume = false;
e58b0e63
PA
720 }
721 }
722
723 tp = inferior_thread ();
724
725 /* If there were any forks/vforks that were caught and are now to be
726 followed, then do so now. */
727 switch (tp->pending_follow.kind)
728 {
729 case TARGET_WAITKIND_FORKED:
730 case TARGET_WAITKIND_VFORKED:
731 {
732 ptid_t parent, child;
733
734 /* If the user did a next/step, etc, over a fork call,
735 preserve the stepping state in the fork child. */
736 if (follow_child && should_resume)
737 {
8358c15c
JK
738 step_resume_breakpoint = clone_momentary_breakpoint
739 (tp->control.step_resume_breakpoint);
16c381f0
JK
740 step_range_start = tp->control.step_range_start;
741 step_range_end = tp->control.step_range_end;
bf4cb9be
TV
742 current_line = tp->current_line;
743 current_symtab = tp->current_symtab;
16c381f0 744 step_frame_id = tp->control.step_frame_id;
186c406b
TT
745 exception_resume_breakpoint
746 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
8980e177 747 thread_fsm = tp->thread_fsm;
e58b0e63
PA
748
749 /* For now, delete the parent's sr breakpoint, otherwise,
750 parent/child sr breakpoints are considered duplicates,
751 and the child version will not be installed. Remove
752 this when the breakpoints module becomes aware of
753 inferiors and address spaces. */
754 delete_step_resume_breakpoint (tp);
16c381f0
JK
755 tp->control.step_range_start = 0;
756 tp->control.step_range_end = 0;
757 tp->control.step_frame_id = null_frame_id;
186c406b 758 delete_exception_resume_breakpoint (tp);
8980e177 759 tp->thread_fsm = NULL;
e58b0e63
PA
760 }
761
762 parent = inferior_ptid;
763 child = tp->pending_follow.value.related_pid;
764
5b6d1e4f 765 process_stratum_target *parent_targ = tp->inf->process_target ();
d83ad864
DB
766 /* Set up inferior(s) as specified by the caller, and tell the
767 target to do whatever is necessary to follow either parent
768 or child. */
769 if (follow_fork_inferior (follow_child, detach_fork))
e58b0e63
PA
770 {
771 /* Target refused to follow, or there's some other reason
772 we shouldn't resume. */
773 should_resume = 0;
774 }
775 else
776 {
777 /* This pending follow fork event is now handled, one way
778 or another. The previous selected thread may be gone
779 from the lists by now, but if it is still around, need
780 to clear the pending follow request. */
5b6d1e4f 781 tp = find_thread_ptid (parent_targ, parent);
e58b0e63
PA
782 if (tp)
783 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
784
785 /* This makes sure we don't try to apply the "Switched
786 over from WAIT_PID" logic above. */
787 nullify_last_target_wait_ptid ();
788
1777feb0 789 /* If we followed the child, switch to it... */
e58b0e63
PA
790 if (follow_child)
791 {
5b6d1e4f 792 thread_info *child_thr = find_thread_ptid (parent_targ, child);
00431a78 793 switch_to_thread (child_thr);
e58b0e63
PA
794
795 /* ... and preserve the stepping state, in case the
796 user was stepping over the fork call. */
797 if (should_resume)
798 {
799 tp = inferior_thread ();
8358c15c
JK
800 tp->control.step_resume_breakpoint
801 = step_resume_breakpoint;
16c381f0
JK
802 tp->control.step_range_start = step_range_start;
803 tp->control.step_range_end = step_range_end;
bf4cb9be
TV
804 tp->current_line = current_line;
805 tp->current_symtab = current_symtab;
16c381f0 806 tp->control.step_frame_id = step_frame_id;
186c406b
TT
807 tp->control.exception_resume_breakpoint
808 = exception_resume_breakpoint;
8980e177 809 tp->thread_fsm = thread_fsm;
e58b0e63
PA
810 }
811 else
812 {
813 /* If we get here, it was because we're trying to
814 resume from a fork catchpoint, but, the user
815 has switched threads away from the thread that
816 forked. In that case, the resume command
817 issued is most likely not applicable to the
818 child, so just warn, and refuse to resume. */
3e43a32a 819 warning (_("Not resuming: switched threads "
fd7dcb94 820 "before following fork child."));
e58b0e63
PA
821 }
822
823 /* Reset breakpoints in the child as appropriate. */
824 follow_inferior_reset_breakpoints ();
825 }
e58b0e63
PA
826 }
827 }
828 break;
829 case TARGET_WAITKIND_SPURIOUS:
830 /* Nothing to follow. */
831 break;
832 default:
833 internal_error (__FILE__, __LINE__,
834 "Unexpected pending_follow.kind %d\n",
835 tp->pending_follow.kind);
836 break;
837 }
c906108c 838
e58b0e63 839 return should_resume;
c906108c
SS
840}
841
d83ad864 842static void
6604731b 843follow_inferior_reset_breakpoints (void)
c906108c 844{
4e1c45ea
PA
845 struct thread_info *tp = inferior_thread ();
846
6604731b
DJ
847 /* Was there a step_resume breakpoint? (There was if the user
848 did a "next" at the fork() call.) If so, explicitly reset its
a1aa2221
LM
849 thread number. Cloned step_resume breakpoints are disabled on
850 creation, so enable it here now that it is associated with the
851 correct thread.
6604731b
DJ
852
853 step_resumes are a form of bp that are made to be per-thread.
854 Since we created the step_resume bp when the parent process
855 was being debugged, and now are switching to the child process,
856 from the breakpoint package's viewpoint, that's a switch of
857 "threads". We must update the bp's notion of which thread
858 it is for, or it'll be ignored when it triggers. */
859
8358c15c 860 if (tp->control.step_resume_breakpoint)
a1aa2221
LM
861 {
862 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
863 tp->control.step_resume_breakpoint->loc->enabled = 1;
864 }
6604731b 865
a1aa2221 866 /* Treat exception_resume breakpoints like step_resume breakpoints. */
186c406b 867 if (tp->control.exception_resume_breakpoint)
a1aa2221
LM
868 {
869 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
870 tp->control.exception_resume_breakpoint->loc->enabled = 1;
871 }
186c406b 872
6604731b
DJ
873 /* Reinsert all breakpoints in the child. The user may have set
874 breakpoints after catching the fork, in which case those
875 were never set in the child, but only in the parent. This makes
876 sure the inserted breakpoints match the breakpoint list. */
877
878 breakpoint_re_set ();
879 insert_breakpoints ();
c906108c 880}
c906108c 881
6c95b8df
PA
882/* The child has exited or execed: resume threads of the parent the
883 user wanted to be executing. */
884
885static int
886proceed_after_vfork_done (struct thread_info *thread,
887 void *arg)
888{
889 int pid = * (int *) arg;
890
00431a78
PA
891 if (thread->ptid.pid () == pid
892 && thread->state == THREAD_RUNNING
893 && !thread->executing
6c95b8df 894 && !thread->stop_requested
a493e3e2 895 && thread->suspend.stop_signal == GDB_SIGNAL_0)
6c95b8df 896 {
1eb8556f
SM
897 infrun_debug_printf ("resuming vfork parent thread %s",
898 target_pid_to_str (thread->ptid).c_str ());
6c95b8df 899
00431a78 900 switch_to_thread (thread);
70509625 901 clear_proceed_status (0);
64ce06e4 902 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
6c95b8df
PA
903 }
904
905 return 0;
906}
907
908/* Called whenever we notice an exec or exit event, to handle
909 detaching or resuming a vfork parent. */
910
911static void
912handle_vfork_child_exec_or_exit (int exec)
913{
914 struct inferior *inf = current_inferior ();
915
916 if (inf->vfork_parent)
917 {
918 int resume_parent = -1;
919
920 /* This exec or exit marks the end of the shared memory region
b73715df
TV
921 between the parent and the child. Break the bonds. */
922 inferior *vfork_parent = inf->vfork_parent;
923 inf->vfork_parent->vfork_child = NULL;
924 inf->vfork_parent = NULL;
6c95b8df 925
b73715df
TV
926 /* If the user wanted to detach from the parent, now is the
927 time. */
928 if (vfork_parent->pending_detach)
6c95b8df 929 {
6c95b8df
PA
930 struct program_space *pspace;
931 struct address_space *aspace;
932
1777feb0 933 /* follow-fork child, detach-on-fork on. */
6c95b8df 934
b73715df 935 vfork_parent->pending_detach = 0;
68c9da30 936
18493a00 937 scoped_restore_current_pspace_and_thread restore_thread;
6c95b8df
PA
938
939 /* We're letting loose of the parent. */
18493a00 940 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
00431a78 941 switch_to_thread (tp);
6c95b8df
PA
942
943 /* We're about to detach from the parent, which implicitly
944 removes breakpoints from its address space. There's a
945 catch here: we want to reuse the spaces for the child,
946 but, parent/child are still sharing the pspace at this
947 point, although the exec in reality makes the kernel give
948 the child a fresh set of new pages. The problem here is
949 that the breakpoints module being unaware of this, would
950 likely chose the child process to write to the parent
951 address space. Swapping the child temporarily away from
952 the spaces has the desired effect. Yes, this is "sort
953 of" a hack. */
954
955 pspace = inf->pspace;
956 aspace = inf->aspace;
957 inf->aspace = NULL;
958 inf->pspace = NULL;
959
f67c0c91 960 if (print_inferior_events)
6c95b8df 961 {
a068643d 962 std::string pidstr
b73715df 963 = target_pid_to_str (ptid_t (vfork_parent->pid));
f67c0c91 964
223ffa71 965 target_terminal::ours_for_output ();
6c95b8df
PA
966
967 if (exec)
6f259a23
DB
968 {
969 fprintf_filtered (gdb_stdlog,
f67c0c91 970 _("[Detaching vfork parent %s "
a068643d 971 "after child exec]\n"), pidstr.c_str ());
6f259a23 972 }
6c95b8df 973 else
6f259a23
DB
974 {
975 fprintf_filtered (gdb_stdlog,
f67c0c91 976 _("[Detaching vfork parent %s "
a068643d 977 "after child exit]\n"), pidstr.c_str ());
6f259a23 978 }
6c95b8df
PA
979 }
980
b73715df 981 target_detach (vfork_parent, 0);
6c95b8df
PA
982
983 /* Put it back. */
984 inf->pspace = pspace;
985 inf->aspace = aspace;
6c95b8df
PA
986 }
987 else if (exec)
988 {
989 /* We're staying attached to the parent, so, really give the
990 child a new address space. */
564b1e3f 991 inf->pspace = new program_space (maybe_new_address_space ());
6c95b8df
PA
992 inf->aspace = inf->pspace->aspace;
993 inf->removable = 1;
994 set_current_program_space (inf->pspace);
995
b73715df 996 resume_parent = vfork_parent->pid;
6c95b8df
PA
997 }
998 else
999 {
6c95b8df
PA
1000 /* If this is a vfork child exiting, then the pspace and
1001 aspaces were shared with the parent. Since we're
1002 reporting the process exit, we'll be mourning all that is
1003 found in the address space, and switching to null_ptid,
1004 preparing to start a new inferior. But, since we don't
1005 want to clobber the parent's address/program spaces, we
1006 go ahead and create a new one for this exiting
1007 inferior. */
1008
18493a00 1009 /* Switch to no-thread while running clone_program_space, so
5ed8105e
PA
1010 that clone_program_space doesn't want to read the
1011 selected frame of a dead process. */
18493a00
PA
1012 scoped_restore_current_thread restore_thread;
1013 switch_to_no_thread ();
6c95b8df 1014
53af73bf
PA
1015 inf->pspace = new program_space (maybe_new_address_space ());
1016 inf->aspace = inf->pspace->aspace;
1017 set_current_program_space (inf->pspace);
6c95b8df 1018 inf->removable = 1;
7dcd53a0 1019 inf->symfile_flags = SYMFILE_NO_READ;
53af73bf 1020 clone_program_space (inf->pspace, vfork_parent->pspace);
6c95b8df 1021
b73715df 1022 resume_parent = vfork_parent->pid;
6c95b8df
PA
1023 }
1024
6c95b8df
PA
1025 gdb_assert (current_program_space == inf->pspace);
1026
1027 if (non_stop && resume_parent != -1)
1028 {
1029 /* If the user wanted the parent to be running, let it go
1030 free now. */
5ed8105e 1031 scoped_restore_current_thread restore_thread;
6c95b8df 1032
1eb8556f
SM
1033 infrun_debug_printf ("resuming vfork parent process %d",
1034 resume_parent);
6c95b8df
PA
1035
1036 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
6c95b8df
PA
1037 }
1038 }
1039}
1040
eb6c553b 1041/* Enum strings for "set|show follow-exec-mode". */
6c95b8df
PA
1042
1043static const char follow_exec_mode_new[] = "new";
1044static const char follow_exec_mode_same[] = "same";
40478521 1045static const char *const follow_exec_mode_names[] =
6c95b8df
PA
1046{
1047 follow_exec_mode_new,
1048 follow_exec_mode_same,
1049 NULL,
1050};
1051
1052static const char *follow_exec_mode_string = follow_exec_mode_same;
1053static void
1054show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1055 struct cmd_list_element *c, const char *value)
1056{
1057 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1058}
1059
ecf45d2c 1060/* EXEC_FILE_TARGET is assumed to be non-NULL. */
1adeb98a 1061
c906108c 1062static void
4ca51187 1063follow_exec (ptid_t ptid, const char *exec_file_target)
c906108c 1064{
6c95b8df 1065 struct inferior *inf = current_inferior ();
e99b03dc 1066 int pid = ptid.pid ();
94585166 1067 ptid_t process_ptid;
7a292a7a 1068
65d2b333
PW
1069 /* Switch terminal for any messages produced e.g. by
1070 breakpoint_re_set. */
1071 target_terminal::ours_for_output ();
1072
c906108c
SS
1073 /* This is an exec event that we actually wish to pay attention to.
1074 Refresh our symbol table to the newly exec'd program, remove any
1075 momentary bp's, etc.
1076
1077 If there are breakpoints, they aren't really inserted now,
1078 since the exec() transformed our inferior into a fresh set
1079 of instructions.
1080
1081 We want to preserve symbolic breakpoints on the list, since
1082 we have hopes that they can be reset after the new a.out's
1083 symbol table is read.
1084
1085 However, any "raw" breakpoints must be removed from the list
1086 (e.g., the solib bp's), since their address is probably invalid
1087 now.
1088
1089 And, we DON'T want to call delete_breakpoints() here, since
1090 that may write the bp's "shadow contents" (the instruction
85102364 1091 value that was overwritten with a TRAP instruction). Since
1777feb0 1092 we now have a new a.out, those shadow contents aren't valid. */
6c95b8df
PA
1093
1094 mark_breakpoints_out ();
1095
95e50b27
PA
1096 /* The target reports the exec event to the main thread, even if
1097 some other thread does the exec, and even if the main thread was
1098 stopped or already gone. We may still have non-leader threads of
1099 the process on our list. E.g., on targets that don't have thread
1100 exit events (like remote); or on native Linux in non-stop mode if
1101 there were only two threads in the inferior and the non-leader
1102 one is the one that execs (and nothing forces an update of the
1103 thread list up to here). When debugging remotely, it's best to
1104 avoid extra traffic, when possible, so avoid syncing the thread
1105 list with the target, and instead go ahead and delete all threads
1106 of the process but one that reported the event. Note this must
1107 be done before calling update_breakpoints_after_exec, as
1108 otherwise clearing the threads' resources would reference stale
1109 thread breakpoints -- it may have been one of these threads that
1110 stepped across the exec. We could just clear their stepping
1111 states, but as long as we're iterating, might as well delete
1112 them. Deleting them now rather than at the next user-visible
1113 stop provides a nicer sequence of events for user and MI
1114 notifications. */
08036331 1115 for (thread_info *th : all_threads_safe ())
d7e15655 1116 if (th->ptid.pid () == pid && th->ptid != ptid)
00431a78 1117 delete_thread (th);
95e50b27
PA
1118
1119 /* We also need to clear any left over stale state for the
1120 leader/event thread. E.g., if there was any step-resume
1121 breakpoint or similar, it's gone now. We cannot truly
1122 step-to-next statement through an exec(). */
08036331 1123 thread_info *th = inferior_thread ();
8358c15c 1124 th->control.step_resume_breakpoint = NULL;
186c406b 1125 th->control.exception_resume_breakpoint = NULL;
34b7e8a6 1126 th->control.single_step_breakpoints = NULL;
16c381f0
JK
1127 th->control.step_range_start = 0;
1128 th->control.step_range_end = 0;
c906108c 1129
95e50b27
PA
1130 /* The user may have had the main thread held stopped in the
1131 previous image (e.g., schedlock on, or non-stop). Release
1132 it now. */
a75724bc
PA
1133 th->stop_requested = 0;
1134
95e50b27
PA
1135 update_breakpoints_after_exec ();
1136
1777feb0 1137 /* What is this a.out's name? */
f2907e49 1138 process_ptid = ptid_t (pid);
6c95b8df 1139 printf_unfiltered (_("%s is executing new program: %s\n"),
a068643d 1140 target_pid_to_str (process_ptid).c_str (),
ecf45d2c 1141 exec_file_target);
c906108c
SS
1142
1143 /* We've followed the inferior through an exec. Therefore, the
1777feb0 1144 inferior has essentially been killed & reborn. */
7a292a7a 1145
6ca15a4b 1146 breakpoint_init_inferior (inf_execd);
e85a822c 1147
797bc1cb
TT
1148 gdb::unique_xmalloc_ptr<char> exec_file_host
1149 = exec_file_find (exec_file_target, NULL);
ff862be4 1150
ecf45d2c
SL
1151 /* If we were unable to map the executable target pathname onto a host
1152 pathname, tell the user that. Otherwise GDB's subsequent behavior
1153 is confusing. Maybe it would even be better to stop at this point
1154 so that the user can specify a file manually before continuing. */
1155 if (exec_file_host == NULL)
1156 warning (_("Could not load symbols for executable %s.\n"
1157 "Do you need \"set sysroot\"?"),
1158 exec_file_target);
c906108c 1159
cce9b6bf
PA
1160 /* Reset the shared library package. This ensures that we get a
1161 shlib event when the child reaches "_start", at which point the
1162 dld will have had a chance to initialize the child. */
1163 /* Also, loading a symbol file below may trigger symbol lookups, and
1164 we don't want those to be satisfied by the libraries of the
1165 previous incarnation of this process. */
1166 no_shared_libraries (NULL, 0);
1167
6c95b8df
PA
1168 if (follow_exec_mode_string == follow_exec_mode_new)
1169 {
6c95b8df
PA
1170 /* The user wants to keep the old inferior and program spaces
1171 around. Create a new fresh one, and switch to it. */
1172
35ed81d4
SM
1173 /* Do exit processing for the original inferior before setting the new
1174 inferior's pid. Having two inferiors with the same pid would confuse
1175 find_inferior_p(t)id. Transfer the terminal state and info from the
1176 old to the new inferior. */
1177 inf = add_inferior_with_spaces ();
1178 swap_terminal_info (inf, current_inferior ());
057302ce 1179 exit_inferior_silent (current_inferior ());
17d8546e 1180
94585166 1181 inf->pid = pid;
ecf45d2c 1182 target_follow_exec (inf, exec_file_target);
6c95b8df 1183
5b6d1e4f
PA
1184 inferior *org_inferior = current_inferior ();
1185 switch_to_inferior_no_thread (inf);
1186 push_target (org_inferior->process_target ());
1187 thread_info *thr = add_thread (inf->process_target (), ptid);
1188 switch_to_thread (thr);
6c95b8df 1189 }
9107fc8d
PA
1190 else
1191 {
1192 /* The old description may no longer be fit for the new image.
1193 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1194 old description; we'll read a new one below. No need to do
1195 this on "follow-exec-mode new", as the old inferior stays
1196 around (its description is later cleared/refetched on
1197 restart). */
1198 target_clear_description ();
1199 }
6c95b8df
PA
1200
1201 gdb_assert (current_program_space == inf->pspace);
1202
ecf45d2c
SL
1203 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1204 because the proper displacement for a PIE (Position Independent
1205 Executable) main symbol file will only be computed by
1206 solib_create_inferior_hook below. breakpoint_re_set would fail
1207 to insert the breakpoints with the zero displacement. */
797bc1cb 1208 try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
c906108c 1209
9107fc8d
PA
1210 /* If the target can specify a description, read it. Must do this
1211 after flipping to the new executable (because the target supplied
1212 description must be compatible with the executable's
1213 architecture, and the old executable may e.g., be 32-bit, while
1214 the new one 64-bit), and before anything involving memory or
1215 registers. */
1216 target_find_description ();
1217
42a4fec5 1218 gdb::observers::inferior_execd.notify (inf);
4efc6507 1219
c1e56572
JK
1220 breakpoint_re_set ();
1221
c906108c
SS
1222 /* Reinsert all breakpoints. (Those which were symbolic have
1223 been reset to the proper address in the new a.out, thanks
1777feb0 1224 to symbol_file_command...). */
c906108c
SS
1225 insert_breakpoints ();
1226
1227 /* The next resume of this inferior should bring it to the shlib
1228 startup breakpoints. (If the user had also set bp's on
1229 "main" from the old (parent) process, then they'll auto-
1777feb0 1230 matically get reset there in the new process.). */
c906108c
SS
1231}
1232
28d5518b 1233/* The chain of threads that need to do a step-over operation to get
c2829269
PA
1234 past e.g., a breakpoint. What technique is used to step over the
1235 breakpoint/watchpoint does not matter -- all threads end up in the
1236 same queue, to maintain rough temporal order of execution, in order
1237 to avoid starvation, otherwise, we could e.g., find ourselves
1238 constantly stepping the same couple threads past their breakpoints
1239 over and over, if the single-step finish fast enough. */
28d5518b 1240struct thread_info *global_thread_step_over_chain_head;
c2829269 1241
6c4cfb24
PA
1242/* Bit flags indicating what the thread needs to step over. */
1243
8d297bbf 1244enum step_over_what_flag
6c4cfb24
PA
1245 {
1246 /* Step over a breakpoint. */
1247 STEP_OVER_BREAKPOINT = 1,
1248
1249 /* Step past a non-continuable watchpoint, in order to let the
1250 instruction execute so we can evaluate the watchpoint
1251 expression. */
1252 STEP_OVER_WATCHPOINT = 2
1253 };
8d297bbf 1254DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
6c4cfb24 1255
963f9c80 1256/* Info about an instruction that is being stepped over. */
31e77af2
PA
1257
1258struct step_over_info
1259{
963f9c80
PA
1260 /* If we're stepping past a breakpoint, this is the address space
1261 and address of the instruction the breakpoint is set at. We'll
1262 skip inserting all breakpoints here. Valid iff ASPACE is
1263 non-NULL. */
8b86c959 1264 const address_space *aspace;
31e77af2 1265 CORE_ADDR address;
963f9c80
PA
1266
1267 /* The instruction being stepped over triggers a nonsteppable
1268 watchpoint. If true, we'll skip inserting watchpoints. */
1269 int nonsteppable_watchpoint_p;
21edc42f
YQ
1270
1271 /* The thread's global number. */
1272 int thread;
31e77af2
PA
1273};
1274
1275/* The step-over info of the location that is being stepped over.
1276
1277 Note that with async/breakpoint always-inserted mode, a user might
1278 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1279 being stepped over. As setting a new breakpoint inserts all
1280 breakpoints, we need to make sure the breakpoint being stepped over
1281 isn't inserted then. We do that by only clearing the step-over
1282 info when the step-over is actually finished (or aborted).
1283
1284 Presently GDB can only step over one breakpoint at any given time.
1285 Given threads that can't run code in the same address space as the
1286 breakpoint's can't really miss the breakpoint, GDB could be taught
1287 to step-over at most one breakpoint per address space (so this info
1288 could move to the address space object if/when GDB is extended).
1289 The set of breakpoints being stepped over will normally be much
1290 smaller than the set of all breakpoints, so a flag in the
1291 breakpoint location structure would be wasteful. A separate list
1292 also saves complexity and run-time, as otherwise we'd have to go
1293 through all breakpoint locations clearing their flag whenever we
1294 start a new sequence. Similar considerations weigh against storing
1295 this info in the thread object. Plus, not all step overs actually
1296 have breakpoint locations -- e.g., stepping past a single-step
1297 breakpoint, or stepping to complete a non-continuable
1298 watchpoint. */
1299static struct step_over_info step_over_info;
1300
1301/* Record the address of the breakpoint/instruction we're currently
ce0db137
DE
1302 stepping over.
1303 N.B. We record the aspace and address now, instead of say just the thread,
1304 because when we need the info later the thread may be running. */
31e77af2
PA
1305
1306static void
8b86c959 1307set_step_over_info (const address_space *aspace, CORE_ADDR address,
21edc42f
YQ
1308 int nonsteppable_watchpoint_p,
1309 int thread)
31e77af2
PA
1310{
1311 step_over_info.aspace = aspace;
1312 step_over_info.address = address;
963f9c80 1313 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
21edc42f 1314 step_over_info.thread = thread;
31e77af2
PA
1315}
1316
1317/* Called when we're not longer stepping over a breakpoint / an
1318 instruction, so all breakpoints are free to be (re)inserted. */
1319
1320static void
1321clear_step_over_info (void)
1322{
1eb8556f 1323 infrun_debug_printf ("clearing step over info");
31e77af2
PA
1324 step_over_info.aspace = NULL;
1325 step_over_info.address = 0;
963f9c80 1326 step_over_info.nonsteppable_watchpoint_p = 0;
21edc42f 1327 step_over_info.thread = -1;
31e77af2
PA
1328}
1329
7f89fd65 1330/* See infrun.h. */
31e77af2
PA
1331
1332int
1333stepping_past_instruction_at (struct address_space *aspace,
1334 CORE_ADDR address)
1335{
1336 return (step_over_info.aspace != NULL
1337 && breakpoint_address_match (aspace, address,
1338 step_over_info.aspace,
1339 step_over_info.address));
1340}
1341
963f9c80
PA
1342/* See infrun.h. */
1343
21edc42f
YQ
1344int
1345thread_is_stepping_over_breakpoint (int thread)
1346{
1347 return (step_over_info.thread != -1
1348 && thread == step_over_info.thread);
1349}
1350
1351/* See infrun.h. */
1352
963f9c80
PA
1353int
1354stepping_past_nonsteppable_watchpoint (void)
1355{
1356 return step_over_info.nonsteppable_watchpoint_p;
1357}
1358
6cc83d2a
PA
1359/* Returns true if step-over info is valid. */
1360
c4464ade 1361static bool
6cc83d2a
PA
1362step_over_info_valid_p (void)
1363{
963f9c80
PA
1364 return (step_over_info.aspace != NULL
1365 || stepping_past_nonsteppable_watchpoint ());
6cc83d2a
PA
1366}
1367
c906108c 1368\f
237fc4c9
PA
1369/* Displaced stepping. */
1370
1371/* In non-stop debugging mode, we must take special care to manage
1372 breakpoints properly; in particular, the traditional strategy for
1373 stepping a thread past a breakpoint it has hit is unsuitable.
1374 'Displaced stepping' is a tactic for stepping one thread past a
1375 breakpoint it has hit while ensuring that other threads running
1376 concurrently will hit the breakpoint as they should.
1377
1378 The traditional way to step a thread T off a breakpoint in a
1379 multi-threaded program in all-stop mode is as follows:
1380
1381 a0) Initially, all threads are stopped, and breakpoints are not
1382 inserted.
1383 a1) We single-step T, leaving breakpoints uninserted.
1384 a2) We insert breakpoints, and resume all threads.
1385
1386 In non-stop debugging, however, this strategy is unsuitable: we
1387 don't want to have to stop all threads in the system in order to
1388 continue or step T past a breakpoint. Instead, we use displaced
1389 stepping:
1390
1391 n0) Initially, T is stopped, other threads are running, and
1392 breakpoints are inserted.
1393 n1) We copy the instruction "under" the breakpoint to a separate
1394 location, outside the main code stream, making any adjustments
1395 to the instruction, register, and memory state as directed by
1396 T's architecture.
1397 n2) We single-step T over the instruction at its new location.
1398 n3) We adjust the resulting register and memory state as directed
1399 by T's architecture. This includes resetting T's PC to point
1400 back into the main instruction stream.
1401 n4) We resume T.
1402
1403 This approach depends on the following gdbarch methods:
1404
1405 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1406 indicate where to copy the instruction, and how much space must
1407 be reserved there. We use these in step n1.
1408
1409 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1410 address, and makes any necessary adjustments to the instruction,
1411 register contents, and memory. We use this in step n1.
1412
1413 - gdbarch_displaced_step_fixup adjusts registers and memory after
85102364 1414 we have successfully single-stepped the instruction, to yield the
237fc4c9
PA
1415 same effect the instruction would have had if we had executed it
1416 at its original address. We use this in step n3.
1417
237fc4c9
PA
1418 The gdbarch_displaced_step_copy_insn and
1419 gdbarch_displaced_step_fixup functions must be written so that
1420 copying an instruction with gdbarch_displaced_step_copy_insn,
1421 single-stepping across the copied instruction, and then applying
1422 gdbarch_displaced_insn_fixup should have the same effects on the
1423 thread's memory and registers as stepping the instruction in place
1424 would have. Exactly which responsibilities fall to the copy and
1425 which fall to the fixup is up to the author of those functions.
1426
1427 See the comments in gdbarch.sh for details.
1428
1429 Note that displaced stepping and software single-step cannot
1430 currently be used in combination, although with some care I think
1431 they could be made to. Software single-step works by placing
1432 breakpoints on all possible subsequent instructions; if the
1433 displaced instruction is a PC-relative jump, those breakpoints
1434 could fall in very strange places --- on pages that aren't
1435 executable, or at addresses that are not proper instruction
1436 boundaries. (We do generally let other threads run while we wait
1437 to hit the software single-step breakpoint, and they might
1438 encounter such a corrupted instruction.) One way to work around
1439 this would be to have gdbarch_displaced_step_copy_insn fully
1440 simulate the effect of PC-relative instructions (and return NULL)
1441 on architectures that use software single-stepping.
1442
1443 In non-stop mode, we can have independent and simultaneous step
1444 requests, so more than one thread may need to simultaneously step
1445 over a breakpoint. The current implementation assumes there is
1446 only one scratch space per process. In this case, we have to
1447 serialize access to the scratch space. If thread A wants to step
1448 over a breakpoint, but we are currently waiting for some other
1449 thread to complete a displaced step, we leave thread A stopped and
1450 place it in the displaced_step_request_queue. Whenever a displaced
1451 step finishes, we pick the next thread in the queue and start a new
1452 displaced step operation on it. See displaced_step_prepare and
7def77a1 1453 displaced_step_finish for details. */
237fc4c9 1454
a46d1843 1455/* Return true if THREAD is doing a displaced step. */
c0987663 1456
c4464ade 1457static bool
00431a78 1458displaced_step_in_progress_thread (thread_info *thread)
c0987663 1459{
00431a78 1460 gdb_assert (thread != NULL);
c0987663 1461
187b041e 1462 return thread->displaced_step_state.in_progress ();
c0987663
YQ
1463}
1464
a46d1843 1465/* Return true if INF has a thread doing a displaced step. */
8f572e5c 1466
c4464ade 1467static bool
00431a78 1468displaced_step_in_progress (inferior *inf)
8f572e5c 1469{
187b041e 1470 return inf->displaced_step_state.in_progress_count > 0;
fc1cf338
PA
1471}
1472
187b041e 1473/* Return true if any thread is doing a displaced step. */
a42244db 1474
187b041e
SM
1475static bool
1476displaced_step_in_progress_any_thread ()
a42244db 1477{
187b041e
SM
1478 for (inferior *inf : all_non_exited_inferiors ())
1479 {
1480 if (displaced_step_in_progress (inf))
1481 return true;
1482 }
a42244db 1483
187b041e 1484 return false;
a42244db
YQ
1485}
1486
fc1cf338
PA
1487static void
1488infrun_inferior_exit (struct inferior *inf)
1489{
d20172fc 1490 inf->displaced_step_state.reset ();
fc1cf338 1491}
237fc4c9 1492
3b7a962d
SM
1493static void
1494infrun_inferior_execd (inferior *inf)
1495{
187b041e
SM
1496 /* If some threads where was doing a displaced step in this inferior at the
1497 moment of the exec, they no longer exist. Even if the exec'ing thread
3b7a962d
SM
1498 doing a displaced step, we don't want to to any fixup nor restore displaced
1499 stepping buffer bytes. */
1500 inf->displaced_step_state.reset ();
1501
187b041e
SM
1502 for (thread_info *thread : inf->threads ())
1503 thread->displaced_step_state.reset ();
1504
3b7a962d
SM
1505 /* Since an in-line step is done with everything else stopped, if there was
1506 one in progress at the time of the exec, it must have been the exec'ing
1507 thread. */
1508 clear_step_over_info ();
1509}
1510
fff08868
HZ
1511/* If ON, and the architecture supports it, GDB will use displaced
1512 stepping to step over breakpoints. If OFF, or if the architecture
1513 doesn't support it, GDB will instead use the traditional
1514 hold-and-step approach. If AUTO (which is the default), GDB will
1515 decide which technique to use to step over breakpoints depending on
9822cb57 1516 whether the target works in a non-stop way (see use_displaced_stepping). */
fff08868 1517
72d0e2c5 1518static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
fff08868 1519
237fc4c9
PA
1520static void
1521show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1522 struct cmd_list_element *c,
1523 const char *value)
1524{
72d0e2c5 1525 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
3e43a32a
MS
1526 fprintf_filtered (file,
1527 _("Debugger's willingness to use displaced stepping "
1528 "to step over breakpoints is %s (currently %s).\n"),
fbea99ea 1529 value, target_is_non_stop_p () ? "on" : "off");
fff08868 1530 else
3e43a32a
MS
1531 fprintf_filtered (file,
1532 _("Debugger's willingness to use displaced stepping "
1533 "to step over breakpoints is %s.\n"), value);
237fc4c9
PA
1534}
1535
9822cb57
SM
1536/* Return true if the gdbarch implements the required methods to use
1537 displaced stepping. */
1538
1539static bool
1540gdbarch_supports_displaced_stepping (gdbarch *arch)
1541{
187b041e
SM
1542 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1543 that if `prepare` is provided, so is `finish`. */
1544 return gdbarch_displaced_step_prepare_p (arch);
9822cb57
SM
1545}
1546
fff08868 1547/* Return non-zero if displaced stepping can/should be used to step
3fc8eb30 1548 over breakpoints of thread TP. */
fff08868 1549
9822cb57
SM
1550static bool
1551use_displaced_stepping (thread_info *tp)
237fc4c9 1552{
9822cb57
SM
1553 /* If the user disabled it explicitly, don't use displaced stepping. */
1554 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1555 return false;
1556
1557 /* If "auto", only use displaced stepping if the target operates in a non-stop
1558 way. */
1559 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1560 && !target_is_non_stop_p ())
1561 return false;
1562
1563 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1564
1565 /* If the architecture doesn't implement displaced stepping, don't use
1566 it. */
1567 if (!gdbarch_supports_displaced_stepping (gdbarch))
1568 return false;
1569
1570 /* If recording, don't use displaced stepping. */
1571 if (find_record_target () != nullptr)
1572 return false;
1573
9822cb57
SM
1574 /* If displaced stepping failed before for this inferior, don't bother trying
1575 again. */
f5f01699 1576 if (tp->inf->displaced_step_state.failed_before)
9822cb57
SM
1577 return false;
1578
1579 return true;
237fc4c9
PA
1580}
1581
187b041e 1582/* Simple function wrapper around displaced_step_thread_state::reset. */
d8d83535 1583
237fc4c9 1584static void
187b041e 1585displaced_step_reset (displaced_step_thread_state *displaced)
237fc4c9 1586{
d8d83535 1587 displaced->reset ();
237fc4c9
PA
1588}
1589
d8d83535
SM
1590/* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1591 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1592
1593using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
237fc4c9 1594
136821d9
SM
1595/* See infrun.h. */
1596
1597std::string
1598displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
237fc4c9 1599{
136821d9 1600 std::string ret;
237fc4c9 1601
136821d9
SM
1602 for (size_t i = 0; i < len; i++)
1603 {
1604 if (i == 0)
1605 ret += string_printf ("%02x", buf[i]);
1606 else
1607 ret += string_printf (" %02x", buf[i]);
1608 }
1609
1610 return ret;
237fc4c9
PA
1611}
1612
1613/* Prepare to single-step, using displaced stepping.
1614
1615 Note that we cannot use displaced stepping when we have a signal to
1616 deliver. If we have a signal to deliver and an instruction to step
1617 over, then after the step, there will be no indication from the
1618 target whether the thread entered a signal handler or ignored the
1619 signal and stepped over the instruction successfully --- both cases
1620 result in a simple SIGTRAP. In the first case we mustn't do a
1621 fixup, and in the second case we must --- but we can't tell which.
1622 Comments in the code for 'random signals' in handle_inferior_event
1623 explain how we handle this case instead.
1624
bab37966
SM
1625 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1626 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1627 if displaced stepping this thread got queued; or
1628 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1629 stepped. */
7f03bd92 1630
bab37966 1631static displaced_step_prepare_status
00431a78 1632displaced_step_prepare_throw (thread_info *tp)
237fc4c9 1633{
00431a78 1634 regcache *regcache = get_thread_regcache (tp);
ac7936df 1635 struct gdbarch *gdbarch = regcache->arch ();
187b041e
SM
1636 displaced_step_thread_state &disp_step_thread_state
1637 = tp->displaced_step_state;
237fc4c9
PA
1638
1639 /* We should never reach this function if the architecture does not
1640 support displaced stepping. */
9822cb57 1641 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
237fc4c9 1642
c2829269
PA
1643 /* Nor if the thread isn't meant to step over a breakpoint. */
1644 gdb_assert (tp->control.trap_expected);
1645
c1e36e3e
PA
1646 /* Disable range stepping while executing in the scratch pad. We
1647 want a single-step even if executing the displaced instruction in
1648 the scratch buffer lands within the stepping range (e.g., a
1649 jump/branch). */
1650 tp->control.may_range_step = 0;
1651
187b041e
SM
1652 /* We are about to start a displaced step for this thread. If one is already
1653 in progress, something's wrong. */
1654 gdb_assert (!disp_step_thread_state.in_progress ());
237fc4c9 1655
187b041e 1656 if (tp->inf->displaced_step_state.unavailable)
237fc4c9 1657 {
187b041e
SM
1658 /* The gdbarch tells us it's not worth asking to try a prepare because
1659 it is likely that it will return unavailable, so don't bother asking. */
237fc4c9 1660
136821d9
SM
1661 displaced_debug_printf ("deferring step of %s",
1662 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1663
28d5518b 1664 global_thread_step_over_chain_enqueue (tp);
bab37966 1665 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
237fc4c9 1666 }
237fc4c9 1667
187b041e
SM
1668 displaced_debug_printf ("displaced-stepping %s now",
1669 target_pid_to_str (tp->ptid).c_str ());
237fc4c9 1670
00431a78
PA
1671 scoped_restore_current_thread restore_thread;
1672
1673 switch_to_thread (tp);
ad53cd71 1674
187b041e
SM
1675 CORE_ADDR original_pc = regcache_read_pc (regcache);
1676 CORE_ADDR displaced_pc;
237fc4c9 1677
187b041e
SM
1678 displaced_step_prepare_status status
1679 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
237fc4c9 1680
187b041e 1681 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
d35ae833 1682 {
187b041e
SM
1683 displaced_debug_printf ("failed to prepare (%s)",
1684 target_pid_to_str (tp->ptid).c_str ());
d35ae833 1685
bab37966 1686 return DISPLACED_STEP_PREPARE_STATUS_CANT;
d35ae833 1687 }
187b041e 1688 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
7f03bd92 1689 {
187b041e
SM
1690 /* Not enough displaced stepping resources available, defer this
1691 request by placing it the queue. */
1692
1693 displaced_debug_printf ("not enough resources available, "
1694 "deferring step of %s",
1695 target_pid_to_str (tp->ptid).c_str ());
1696
1697 global_thread_step_over_chain_enqueue (tp);
1698
1699 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
7f03bd92 1700 }
237fc4c9 1701
187b041e
SM
1702 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1703
9f5a595d
UW
1704 /* Save the information we need to fix things up if the step
1705 succeeds. */
187b041e 1706 disp_step_thread_state.set (gdbarch);
9f5a595d 1707
187b041e 1708 tp->inf->displaced_step_state.in_progress_count++;
ad53cd71 1709
187b041e
SM
1710 displaced_debug_printf ("prepared successfully thread=%s, "
1711 "original_pc=%s, displaced_pc=%s",
1712 target_pid_to_str (tp->ptid).c_str (),
1713 paddress (gdbarch, original_pc),
1714 paddress (gdbarch, displaced_pc));
237fc4c9 1715
bab37966 1716 return DISPLACED_STEP_PREPARE_STATUS_OK;
237fc4c9
PA
1717}
1718
3fc8eb30
PA
1719/* Wrapper for displaced_step_prepare_throw that disabled further
1720 attempts at displaced stepping if we get a memory error. */
1721
bab37966 1722static displaced_step_prepare_status
00431a78 1723displaced_step_prepare (thread_info *thread)
3fc8eb30 1724{
bab37966
SM
1725 displaced_step_prepare_status status
1726 = DISPLACED_STEP_PREPARE_STATUS_CANT;
3fc8eb30 1727
a70b8144 1728 try
3fc8eb30 1729 {
bab37966 1730 status = displaced_step_prepare_throw (thread);
3fc8eb30 1731 }
230d2906 1732 catch (const gdb_exception_error &ex)
3fc8eb30 1733 {
16b41842
PA
1734 if (ex.error != MEMORY_ERROR
1735 && ex.error != NOT_SUPPORTED_ERROR)
eedc3f4f 1736 throw;
3fc8eb30 1737
1eb8556f
SM
1738 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1739 ex.what ());
3fc8eb30
PA
1740
1741 /* Be verbose if "set displaced-stepping" is "on", silent if
1742 "auto". */
1743 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1744 {
fd7dcb94 1745 warning (_("disabling displaced stepping: %s"),
3d6e9d23 1746 ex.what ());
3fc8eb30
PA
1747 }
1748
1749 /* Disable further displaced stepping attempts. */
f5f01699 1750 thread->inf->displaced_step_state.failed_before = 1;
3fc8eb30 1751 }
3fc8eb30 1752
bab37966 1753 return status;
3fc8eb30
PA
1754}
1755
bab37966
SM
1756/* If we displaced stepped an instruction successfully, adjust registers and
1757 memory to yield the same effect the instruction would have had if we had
1758 executed it at its original address, and return
1759 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1760 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
372316f1 1761
bab37966
SM
1762 If the thread wasn't displaced stepping, return
1763 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1764
1765static displaced_step_finish_status
7def77a1 1766displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
237fc4c9 1767{
187b041e 1768 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
fc1cf338 1769
187b041e
SM
1770 /* Was this thread performing a displaced step? */
1771 if (!displaced->in_progress ())
bab37966 1772 return DISPLACED_STEP_FINISH_STATUS_OK;
237fc4c9 1773
187b041e
SM
1774 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
1775 event_thread->inf->displaced_step_state.in_progress_count--;
1776
cb71640d
PA
1777 /* Fixup may need to read memory/registers. Switch to the thread
1778 that we're fixing up. Also, target_stopped_by_watchpoint checks
d43b7a2d
TBA
1779 the current thread, and displaced_step_restore performs ptid-dependent
1780 memory accesses using current_inferior() and current_top_target(). */
00431a78 1781 switch_to_thread (event_thread);
cb71640d 1782
d43b7a2d
TBA
1783 displaced_step_reset_cleanup cleanup (displaced);
1784
187b041e
SM
1785 /* Do the fixup, and release the resources acquired to do the displaced
1786 step. */
1787 return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
1788 event_thread, signal);
c2829269 1789}
1c5cfe86 1790
4d9d9d04
PA
1791/* Data to be passed around while handling an event. This data is
1792 discarded between events. */
1793struct execution_control_state
1794{
5b6d1e4f 1795 process_stratum_target *target;
4d9d9d04
PA
1796 ptid_t ptid;
1797 /* The thread that got the event, if this was a thread event; NULL
1798 otherwise. */
1799 struct thread_info *event_thread;
1800
1801 struct target_waitstatus ws;
1802 int stop_func_filled_in;
1803 CORE_ADDR stop_func_start;
1804 CORE_ADDR stop_func_end;
1805 const char *stop_func_name;
1806 int wait_some_more;
1807
1808 /* True if the event thread hit the single-step breakpoint of
1809 another thread. Thus the event doesn't cause a stop, the thread
1810 needs to be single-stepped past the single-step breakpoint before
1811 we can switch back to the original stepping thread. */
1812 int hit_singlestep_breakpoint;
1813};
1814
1815/* Clear ECS and set it to point at TP. */
c2829269
PA
1816
1817static void
4d9d9d04
PA
1818reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
1819{
1820 memset (ecs, 0, sizeof (*ecs));
1821 ecs->event_thread = tp;
1822 ecs->ptid = tp->ptid;
1823}
1824
1825static void keep_going_pass_signal (struct execution_control_state *ecs);
1826static void prepare_to_wait (struct execution_control_state *ecs);
c4464ade 1827static bool keep_going_stepped_thread (struct thread_info *tp);
8d297bbf 1828static step_over_what thread_still_needs_step_over (struct thread_info *tp);
4d9d9d04
PA
1829
1830/* Are there any pending step-over requests? If so, run all we can
1831 now and return true. Otherwise, return false. */
1832
c4464ade 1833static bool
c2829269
PA
1834start_step_over (void)
1835{
187b041e 1836 thread_info *next;
c2829269 1837
372316f1
PA
1838 /* Don't start a new step-over if we already have an in-line
1839 step-over operation ongoing. */
1840 if (step_over_info_valid_p ())
c4464ade 1841 return false;
372316f1 1842
187b041e
SM
1843 /* Steal the global thread step over chain. As we try to initiate displaced
1844 steps, threads will be enqueued in the global chain if no buffers are
1845 available. If we iterated on the global chain directly, we might iterate
1846 indefinitely. */
1847 thread_info *threads_to_step = global_thread_step_over_chain_head;
1848 global_thread_step_over_chain_head = NULL;
1849
1850 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
1851 thread_step_over_chain_length (threads_to_step));
1852
1853 bool started = false;
1854
1855 /* On scope exit (whatever the reason, return or exception), if there are
1856 threads left in the THREADS_TO_STEP chain, put back these threads in the
1857 global list. */
1858 SCOPE_EXIT
1859 {
1860 if (threads_to_step == nullptr)
1861 infrun_debug_printf ("step-over queue now empty");
1862 else
1863 {
1864 infrun_debug_printf ("putting back %d threads to step in global queue",
1865 thread_step_over_chain_length (threads_to_step));
1866
1867 global_thread_step_over_chain_enqueue_chain (threads_to_step);
1868 }
1869 };
1870
1871 for (thread_info *tp = threads_to_step; tp != NULL; tp = next)
237fc4c9 1872 {
4d9d9d04
PA
1873 struct execution_control_state ecss;
1874 struct execution_control_state *ecs = &ecss;
8d297bbf 1875 step_over_what step_what;
372316f1 1876 int must_be_in_line;
c2829269 1877
c65d6b55
PA
1878 gdb_assert (!tp->stop_requested);
1879
187b041e 1880 next = thread_step_over_chain_next (threads_to_step, tp);
237fc4c9 1881
187b041e
SM
1882 if (tp->inf->displaced_step_state.unavailable)
1883 {
1884 /* The arch told us to not even try preparing another displaced step
1885 for this inferior. Just leave the thread in THREADS_TO_STEP, it
1886 will get moved to the global chain on scope exit. */
1887 continue;
1888 }
1889
1890 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
1891 while we try to prepare the displaced step, we don't add it back to
1892 the global step over chain. This is to avoid a thread staying in the
1893 step over chain indefinitely if something goes wrong when resuming it
1894 If the error is intermittent and it still needs a step over, it will
1895 get enqueued again when we try to resume it normally. */
1896 thread_step_over_chain_remove (&threads_to_step, tp);
c2829269 1897
372316f1
PA
1898 step_what = thread_still_needs_step_over (tp);
1899 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
1900 || ((step_what & STEP_OVER_BREAKPOINT)
3fc8eb30 1901 && !use_displaced_stepping (tp)));
372316f1
PA
1902
1903 /* We currently stop all threads of all processes to step-over
1904 in-line. If we need to start a new in-line step-over, let
1905 any pending displaced steps finish first. */
187b041e
SM
1906 if (must_be_in_line && displaced_step_in_progress_any_thread ())
1907 {
1908 global_thread_step_over_chain_enqueue (tp);
1909 continue;
1910 }
c2829269 1911
372316f1
PA
1912 if (tp->control.trap_expected
1913 || tp->resumed
1914 || tp->executing)
ad53cd71 1915 {
4d9d9d04
PA
1916 internal_error (__FILE__, __LINE__,
1917 "[%s] has inconsistent state: "
372316f1 1918 "trap_expected=%d, resumed=%d, executing=%d\n",
a068643d 1919 target_pid_to_str (tp->ptid).c_str (),
4d9d9d04 1920 tp->control.trap_expected,
372316f1 1921 tp->resumed,
4d9d9d04 1922 tp->executing);
ad53cd71 1923 }
1c5cfe86 1924
1eb8556f
SM
1925 infrun_debug_printf ("resuming [%s] for step-over",
1926 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04
PA
1927
1928 /* keep_going_pass_signal skips the step-over if the breakpoint
1929 is no longer inserted. In all-stop, we want to keep looking
1930 for a thread that needs a step-over instead of resuming TP,
1931 because we wouldn't be able to resume anything else until the
1932 target stops again. In non-stop, the resume always resumes
1933 only TP, so it's OK to let the thread resume freely. */
fbea99ea 1934 if (!target_is_non_stop_p () && !step_what)
4d9d9d04 1935 continue;
8550d3b3 1936
00431a78 1937 switch_to_thread (tp);
4d9d9d04
PA
1938 reset_ecs (ecs, tp);
1939 keep_going_pass_signal (ecs);
1c5cfe86 1940
4d9d9d04
PA
1941 if (!ecs->wait_some_more)
1942 error (_("Command aborted."));
1c5cfe86 1943
187b041e
SM
1944 /* If the thread's step over could not be initiated because no buffers
1945 were available, it was re-added to the global step over chain. */
1946 if (tp->resumed)
1947 {
1948 infrun_debug_printf ("[%s] was resumed.",
1949 target_pid_to_str (tp->ptid).c_str ());
1950 gdb_assert (!thread_is_in_step_over_chain (tp));
1951 }
1952 else
1953 {
1954 infrun_debug_printf ("[%s] was NOT resumed.",
1955 target_pid_to_str (tp->ptid).c_str ());
1956 gdb_assert (thread_is_in_step_over_chain (tp));
1957 }
372316f1
PA
1958
1959 /* If we started a new in-line step-over, we're done. */
1960 if (step_over_info_valid_p ())
1961 {
1962 gdb_assert (tp->control.trap_expected);
187b041e
SM
1963 started = true;
1964 break;
372316f1
PA
1965 }
1966
fbea99ea 1967 if (!target_is_non_stop_p ())
4d9d9d04
PA
1968 {
1969 /* On all-stop, shouldn't have resumed unless we needed a
1970 step over. */
1971 gdb_assert (tp->control.trap_expected
1972 || tp->step_after_step_resume_breakpoint);
1973
1974 /* With remote targets (at least), in all-stop, we can't
1975 issue any further remote commands until the program stops
1976 again. */
187b041e
SM
1977 started = true;
1978 break;
1c5cfe86 1979 }
c2829269 1980
4d9d9d04
PA
1981 /* Either the thread no longer needed a step-over, or a new
1982 displaced stepping sequence started. Even in the latter
1983 case, continue looking. Maybe we can also start another
1984 displaced step on a thread of other process. */
237fc4c9 1985 }
4d9d9d04 1986
187b041e 1987 return started;
237fc4c9
PA
1988}
1989
5231c1fd
PA
1990/* Update global variables holding ptids to hold NEW_PTID if they were
1991 holding OLD_PTID. */
1992static void
b161a60d
SM
1993infrun_thread_ptid_changed (process_stratum_target *target,
1994 ptid_t old_ptid, ptid_t new_ptid)
5231c1fd 1995{
b161a60d
SM
1996 if (inferior_ptid == old_ptid
1997 && current_inferior ()->process_target () == target)
5231c1fd 1998 inferior_ptid = new_ptid;
5231c1fd
PA
1999}
2000
237fc4c9 2001\f
c906108c 2002
53904c9e
AC
2003static const char schedlock_off[] = "off";
2004static const char schedlock_on[] = "on";
2005static const char schedlock_step[] = "step";
f2665db5 2006static const char schedlock_replay[] = "replay";
40478521 2007static const char *const scheduler_enums[] = {
ef346e04
AC
2008 schedlock_off,
2009 schedlock_on,
2010 schedlock_step,
f2665db5 2011 schedlock_replay,
ef346e04
AC
2012 NULL
2013};
f2665db5 2014static const char *scheduler_mode = schedlock_replay;
920d2a44
AC
2015static void
2016show_scheduler_mode (struct ui_file *file, int from_tty,
2017 struct cmd_list_element *c, const char *value)
2018{
3e43a32a
MS
2019 fprintf_filtered (file,
2020 _("Mode for locking scheduler "
2021 "during execution is \"%s\".\n"),
920d2a44
AC
2022 value);
2023}
c906108c
SS
2024
2025static void
eb4c3f4a 2026set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
c906108c 2027{
8a3ecb79 2028 if (!target_can_lock_scheduler ())
eefe576e
AC
2029 {
2030 scheduler_mode = schedlock_off;
2031 error (_("Target '%s' cannot support this command."), target_shortname);
2032 }
c906108c
SS
2033}
2034
d4db2f36
PA
2035/* True if execution commands resume all threads of all processes by
2036 default; otherwise, resume only threads of the current inferior
2037 process. */
491144b5 2038bool sched_multi = false;
d4db2f36 2039
2facfe5c 2040/* Try to setup for software single stepping over the specified location.
c4464ade 2041 Return true if target_resume() should use hardware single step.
2facfe5c
DD
2042
2043 GDBARCH the current gdbarch.
2044 PC the location to step over. */
2045
c4464ade 2046static bool
2facfe5c
DD
2047maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2048{
c4464ade 2049 bool hw_step = true;
2facfe5c 2050
f02253f1 2051 if (execution_direction == EXEC_FORWARD
93f9a11f
YQ
2052 && gdbarch_software_single_step_p (gdbarch))
2053 hw_step = !insert_single_step_breakpoints (gdbarch);
2054
2facfe5c
DD
2055 return hw_step;
2056}
c906108c 2057
f3263aa4
PA
2058/* See infrun.h. */
2059
09cee04b
PA
2060ptid_t
2061user_visible_resume_ptid (int step)
2062{
f3263aa4 2063 ptid_t resume_ptid;
09cee04b 2064
09cee04b
PA
2065 if (non_stop)
2066 {
2067 /* With non-stop mode on, threads are always handled
2068 individually. */
2069 resume_ptid = inferior_ptid;
2070 }
2071 else if ((scheduler_mode == schedlock_on)
03d46957 2072 || (scheduler_mode == schedlock_step && step))
09cee04b 2073 {
f3263aa4
PA
2074 /* User-settable 'scheduler' mode requires solo thread
2075 resume. */
09cee04b
PA
2076 resume_ptid = inferior_ptid;
2077 }
f2665db5
MM
2078 else if ((scheduler_mode == schedlock_replay)
2079 && target_record_will_replay (minus_one_ptid, execution_direction))
2080 {
2081 /* User-settable 'scheduler' mode requires solo thread resume in replay
2082 mode. */
2083 resume_ptid = inferior_ptid;
2084 }
f3263aa4
PA
2085 else if (!sched_multi && target_supports_multi_process ())
2086 {
2087 /* Resume all threads of the current process (and none of other
2088 processes). */
e99b03dc 2089 resume_ptid = ptid_t (inferior_ptid.pid ());
f3263aa4
PA
2090 }
2091 else
2092 {
2093 /* Resume all threads of all processes. */
2094 resume_ptid = RESUME_ALL;
2095 }
09cee04b
PA
2096
2097 return resume_ptid;
2098}
2099
5b6d1e4f
PA
2100/* See infrun.h. */
2101
2102process_stratum_target *
2103user_visible_resume_target (ptid_t resume_ptid)
2104{
2105 return (resume_ptid == minus_one_ptid && sched_multi
2106 ? NULL
2107 : current_inferior ()->process_target ());
2108}
2109
fbea99ea
PA
2110/* Return a ptid representing the set of threads that we will resume,
2111 in the perspective of the target, assuming run control handling
2112 does not require leaving some threads stopped (e.g., stepping past
2113 breakpoint). USER_STEP indicates whether we're about to start the
2114 target for a stepping command. */
2115
2116static ptid_t
2117internal_resume_ptid (int user_step)
2118{
2119 /* In non-stop, we always control threads individually. Note that
2120 the target may always work in non-stop mode even with "set
2121 non-stop off", in which case user_visible_resume_ptid could
2122 return a wildcard ptid. */
2123 if (target_is_non_stop_p ())
2124 return inferior_ptid;
2125 else
2126 return user_visible_resume_ptid (user_step);
2127}
2128
64ce06e4
PA
2129/* Wrapper for target_resume, that handles infrun-specific
2130 bookkeeping. */
2131
2132static void
c4464ade 2133do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
64ce06e4
PA
2134{
2135 struct thread_info *tp = inferior_thread ();
2136
c65d6b55
PA
2137 gdb_assert (!tp->stop_requested);
2138
64ce06e4 2139 /* Install inferior's terminal modes. */
223ffa71 2140 target_terminal::inferior ();
64ce06e4
PA
2141
2142 /* Avoid confusing the next resume, if the next stop/resume
2143 happens to apply to another thread. */
2144 tp->suspend.stop_signal = GDB_SIGNAL_0;
2145
8f572e5c
PA
2146 /* Advise target which signals may be handled silently.
2147
2148 If we have removed breakpoints because we are stepping over one
2149 in-line (in any thread), we need to receive all signals to avoid
2150 accidentally skipping a breakpoint during execution of a signal
2151 handler.
2152
2153 Likewise if we're displaced stepping, otherwise a trap for a
2154 breakpoint in a signal handler might be confused with the
7def77a1 2155 displaced step finishing. We don't make the displaced_step_finish
8f572e5c
PA
2156 step distinguish the cases instead, because:
2157
2158 - a backtrace while stopped in the signal handler would show the
2159 scratch pad as frame older than the signal handler, instead of
2160 the real mainline code.
2161
2162 - when the thread is later resumed, the signal handler would
2163 return to the scratch pad area, which would no longer be
2164 valid. */
2165 if (step_over_info_valid_p ()
00431a78 2166 || displaced_step_in_progress (tp->inf))
adc6a863 2167 target_pass_signals ({});
64ce06e4 2168 else
adc6a863 2169 target_pass_signals (signal_pass);
64ce06e4
PA
2170
2171 target_resume (resume_ptid, step, sig);
85ad3aaf
PA
2172
2173 target_commit_resume ();
5b6d1e4f
PA
2174
2175 if (target_can_async_p ())
2176 target_async (1);
64ce06e4
PA
2177}
2178
d930703d 2179/* Resume the inferior. SIG is the signal to give the inferior
71d378ae
PA
2180 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2181 call 'resume', which handles exceptions. */
c906108c 2182
71d378ae
PA
2183static void
2184resume_1 (enum gdb_signal sig)
c906108c 2185{
515630c5 2186 struct regcache *regcache = get_current_regcache ();
ac7936df 2187 struct gdbarch *gdbarch = regcache->arch ();
4e1c45ea 2188 struct thread_info *tp = inferior_thread ();
8b86c959 2189 const address_space *aspace = regcache->aspace ();
b0f16a3e 2190 ptid_t resume_ptid;
856e7dd6
PA
2191 /* This represents the user's step vs continue request. When
2192 deciding whether "set scheduler-locking step" applies, it's the
2193 user's intention that counts. */
2194 const int user_step = tp->control.stepping_command;
64ce06e4
PA
2195 /* This represents what we'll actually request the target to do.
2196 This can decay from a step to a continue, if e.g., we need to
2197 implement single-stepping with breakpoints (software
2198 single-step). */
c4464ade 2199 bool step;
c7e8a53c 2200
c65d6b55 2201 gdb_assert (!tp->stop_requested);
c2829269
PA
2202 gdb_assert (!thread_is_in_step_over_chain (tp));
2203
372316f1
PA
2204 if (tp->suspend.waitstatus_pending_p)
2205 {
1eb8556f
SM
2206 infrun_debug_printf
2207 ("thread %s has pending wait "
2208 "status %s (currently_stepping=%d).",
2209 target_pid_to_str (tp->ptid).c_str (),
2210 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2211 currently_stepping (tp));
372316f1 2212
5b6d1e4f 2213 tp->inf->process_target ()->threads_executing = true;
719546c4 2214 tp->resumed = true;
372316f1
PA
2215
2216 /* FIXME: What should we do if we are supposed to resume this
2217 thread with a signal? Maybe we should maintain a queue of
2218 pending signals to deliver. */
2219 if (sig != GDB_SIGNAL_0)
2220 {
fd7dcb94 2221 warning (_("Couldn't deliver signal %s to %s."),
a068643d
TT
2222 gdb_signal_to_name (sig),
2223 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2224 }
2225
2226 tp->suspend.stop_signal = GDB_SIGNAL_0;
372316f1
PA
2227
2228 if (target_can_async_p ())
9516f85a
AB
2229 {
2230 target_async (1);
2231 /* Tell the event loop we have an event to process. */
2232 mark_async_event_handler (infrun_async_inferior_event_token);
2233 }
372316f1
PA
2234 return;
2235 }
2236
2237 tp->stepped_breakpoint = 0;
2238
6b403daa
PA
2239 /* Depends on stepped_breakpoint. */
2240 step = currently_stepping (tp);
2241
74609e71
YQ
2242 if (current_inferior ()->waiting_for_vfork_done)
2243 {
48f9886d
PA
2244 /* Don't try to single-step a vfork parent that is waiting for
2245 the child to get out of the shared memory region (by exec'ing
2246 or exiting). This is particularly important on software
2247 single-step archs, as the child process would trip on the
2248 software single step breakpoint inserted for the parent
2249 process. Since the parent will not actually execute any
2250 instruction until the child is out of the shared region (such
2251 are vfork's semantics), it is safe to simply continue it.
2252 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2253 the parent, and tell it to `keep_going', which automatically
2254 re-sets it stepping. */
1eb8556f 2255 infrun_debug_printf ("resume : clear step");
c4464ade 2256 step = false;
74609e71
YQ
2257 }
2258
7ca9b62a
TBA
2259 CORE_ADDR pc = regcache_read_pc (regcache);
2260
1eb8556f
SM
2261 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2262 "current thread [%s] at %s",
2263 step, gdb_signal_to_symbol_string (sig),
2264 tp->control.trap_expected,
2265 target_pid_to_str (inferior_ptid).c_str (),
2266 paddress (gdbarch, pc));
c906108c 2267
c2c6d25f
JM
2268 /* Normally, by the time we reach `resume', the breakpoints are either
2269 removed or inserted, as appropriate. The exception is if we're sitting
2270 at a permanent breakpoint; we need to step over it, but permanent
2271 breakpoints can't be removed. So we have to test for it here. */
6c95b8df 2272 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
6d350bb5 2273 {
af48d08f
PA
2274 if (sig != GDB_SIGNAL_0)
2275 {
2276 /* We have a signal to pass to the inferior. The resume
2277 may, or may not take us to the signal handler. If this
2278 is a step, we'll need to stop in the signal handler, if
2279 there's one, (if the target supports stepping into
2280 handlers), or in the next mainline instruction, if
2281 there's no handler. If this is a continue, we need to be
2282 sure to run the handler with all breakpoints inserted.
2283 In all cases, set a breakpoint at the current address
2284 (where the handler returns to), and once that breakpoint
2285 is hit, resume skipping the permanent breakpoint. If
2286 that breakpoint isn't hit, then we've stepped into the
2287 signal handler (or hit some other event). We'll delete
2288 the step-resume breakpoint then. */
2289
1eb8556f
SM
2290 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2291 "deliver signal first");
af48d08f
PA
2292
2293 clear_step_over_info ();
2294 tp->control.trap_expected = 0;
2295
2296 if (tp->control.step_resume_breakpoint == NULL)
2297 {
2298 /* Set a "high-priority" step-resume, as we don't want
2299 user breakpoints at PC to trigger (again) when this
2300 hits. */
2301 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2302 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2303
2304 tp->step_after_step_resume_breakpoint = step;
2305 }
2306
2307 insert_breakpoints ();
2308 }
2309 else
2310 {
2311 /* There's no signal to pass, we can go ahead and skip the
2312 permanent breakpoint manually. */
1eb8556f 2313 infrun_debug_printf ("skipping permanent breakpoint");
af48d08f
PA
2314 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2315 /* Update pc to reflect the new address from which we will
2316 execute instructions. */
2317 pc = regcache_read_pc (regcache);
2318
2319 if (step)
2320 {
2321 /* We've already advanced the PC, so the stepping part
2322 is done. Now we need to arrange for a trap to be
2323 reported to handle_inferior_event. Set a breakpoint
2324 at the current PC, and run to it. Don't update
2325 prev_pc, because if we end in
44a1ee51
PA
2326 switch_back_to_stepped_thread, we want the "expected
2327 thread advanced also" branch to be taken. IOW, we
2328 don't want this thread to step further from PC
af48d08f 2329 (overstep). */
1ac806b8 2330 gdb_assert (!step_over_info_valid_p ());
af48d08f
PA
2331 insert_single_step_breakpoint (gdbarch, aspace, pc);
2332 insert_breakpoints ();
2333
fbea99ea 2334 resume_ptid = internal_resume_ptid (user_step);
c4464ade 2335 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
719546c4 2336 tp->resumed = true;
af48d08f
PA
2337 return;
2338 }
2339 }
6d350bb5 2340 }
c2c6d25f 2341
c1e36e3e
PA
2342 /* If we have a breakpoint to step over, make sure to do a single
2343 step only. Same if we have software watchpoints. */
2344 if (tp->control.trap_expected || bpstat_should_step ())
2345 tp->control.may_range_step = 0;
2346
7da6a5b9
LM
2347 /* If displaced stepping is enabled, step over breakpoints by executing a
2348 copy of the instruction at a different address.
237fc4c9
PA
2349
2350 We can't use displaced stepping when we have a signal to deliver;
2351 the comments for displaced_step_prepare explain why. The
2352 comments in the handle_inferior event for dealing with 'random
74609e71
YQ
2353 signals' explain what we do instead.
2354
2355 We can't use displaced stepping when we are waiting for vfork_done
2356 event, displaced stepping breaks the vfork child similarly as single
2357 step software breakpoint. */
3fc8eb30
PA
2358 if (tp->control.trap_expected
2359 && use_displaced_stepping (tp)
cb71640d 2360 && !step_over_info_valid_p ()
a493e3e2 2361 && sig == GDB_SIGNAL_0
74609e71 2362 && !current_inferior ()->waiting_for_vfork_done)
237fc4c9 2363 {
bab37966
SM
2364 displaced_step_prepare_status prepare_status
2365 = displaced_step_prepare (tp);
fc1cf338 2366
bab37966 2367 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
d56b7306 2368 {
1eb8556f 2369 infrun_debug_printf ("Got placed in step-over queue");
4d9d9d04
PA
2370
2371 tp->control.trap_expected = 0;
d56b7306
VP
2372 return;
2373 }
bab37966 2374 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
3fc8eb30
PA
2375 {
2376 /* Fallback to stepping over the breakpoint in-line. */
2377
2378 if (target_is_non_stop_p ())
2379 stop_all_threads ();
2380
a01bda52 2381 set_step_over_info (regcache->aspace (),
21edc42f 2382 regcache_read_pc (regcache), 0, tp->global_num);
3fc8eb30
PA
2383
2384 step = maybe_software_singlestep (gdbarch, pc);
2385
2386 insert_breakpoints ();
2387 }
bab37966 2388 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
3fc8eb30 2389 {
3fc8eb30
PA
2390 /* Update pc to reflect the new address from which we will
2391 execute instructions due to displaced stepping. */
00431a78 2392 pc = regcache_read_pc (get_thread_regcache (tp));
ca7781d2 2393
40a53766 2394 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
3fc8eb30 2395 }
bab37966
SM
2396 else
2397 gdb_assert_not_reached (_("Invalid displaced_step_prepare_status "
2398 "value."));
237fc4c9
PA
2399 }
2400
2facfe5c 2401 /* Do we need to do it the hard way, w/temp breakpoints? */
99e40580 2402 else if (step)
2facfe5c 2403 step = maybe_software_singlestep (gdbarch, pc);
c906108c 2404
30852783
UW
2405 /* Currently, our software single-step implementation leads to different
2406 results than hardware single-stepping in one situation: when stepping
2407 into delivering a signal which has an associated signal handler,
2408 hardware single-step will stop at the first instruction of the handler,
2409 while software single-step will simply skip execution of the handler.
2410
2411 For now, this difference in behavior is accepted since there is no
2412 easy way to actually implement single-stepping into a signal handler
2413 without kernel support.
2414
2415 However, there is one scenario where this difference leads to follow-on
2416 problems: if we're stepping off a breakpoint by removing all breakpoints
2417 and then single-stepping. In this case, the software single-step
2418 behavior means that even if there is a *breakpoint* in the signal
2419 handler, GDB still would not stop.
2420
2421 Fortunately, we can at least fix this particular issue. We detect
2422 here the case where we are about to deliver a signal while software
2423 single-stepping with breakpoints removed. In this situation, we
2424 revert the decisions to remove all breakpoints and insert single-
2425 step breakpoints, and instead we install a step-resume breakpoint
2426 at the current address, deliver the signal without stepping, and
2427 once we arrive back at the step-resume breakpoint, actually step
2428 over the breakpoint we originally wanted to step over. */
34b7e8a6 2429 if (thread_has_single_step_breakpoints_set (tp)
6cc83d2a
PA
2430 && sig != GDB_SIGNAL_0
2431 && step_over_info_valid_p ())
30852783
UW
2432 {
2433 /* If we have nested signals or a pending signal is delivered
7da6a5b9 2434 immediately after a handler returns, might already have
30852783
UW
2435 a step-resume breakpoint set on the earlier handler. We cannot
2436 set another step-resume breakpoint; just continue on until the
2437 original breakpoint is hit. */
2438 if (tp->control.step_resume_breakpoint == NULL)
2439 {
2c03e5be 2440 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
30852783
UW
2441 tp->step_after_step_resume_breakpoint = 1;
2442 }
2443
34b7e8a6 2444 delete_single_step_breakpoints (tp);
30852783 2445
31e77af2 2446 clear_step_over_info ();
30852783 2447 tp->control.trap_expected = 0;
31e77af2
PA
2448
2449 insert_breakpoints ();
30852783
UW
2450 }
2451
b0f16a3e
SM
2452 /* If STEP is set, it's a request to use hardware stepping
2453 facilities. But in that case, we should never
2454 use singlestep breakpoint. */
34b7e8a6 2455 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
dfcd3bfb 2456
fbea99ea 2457 /* Decide the set of threads to ask the target to resume. */
1946c4cc 2458 if (tp->control.trap_expected)
b0f16a3e
SM
2459 {
2460 /* We're allowing a thread to run past a breakpoint it has
1946c4cc
YQ
2461 hit, either by single-stepping the thread with the breakpoint
2462 removed, or by displaced stepping, with the breakpoint inserted.
2463 In the former case, we need to single-step only this thread,
2464 and keep others stopped, as they can miss this breakpoint if
2465 allowed to run. That's not really a problem for displaced
2466 stepping, but, we still keep other threads stopped, in case
2467 another thread is also stopped for a breakpoint waiting for
2468 its turn in the displaced stepping queue. */
b0f16a3e
SM
2469 resume_ptid = inferior_ptid;
2470 }
fbea99ea
PA
2471 else
2472 resume_ptid = internal_resume_ptid (user_step);
d4db2f36 2473
7f5ef605
PA
2474 if (execution_direction != EXEC_REVERSE
2475 && step && breakpoint_inserted_here_p (aspace, pc))
b0f16a3e 2476 {
372316f1
PA
2477 /* There are two cases where we currently need to step a
2478 breakpoint instruction when we have a signal to deliver:
2479
2480 - See handle_signal_stop where we handle random signals that
2481 could take out us out of the stepping range. Normally, in
2482 that case we end up continuing (instead of stepping) over the
7f5ef605
PA
2483 signal handler with a breakpoint at PC, but there are cases
2484 where we should _always_ single-step, even if we have a
2485 step-resume breakpoint, like when a software watchpoint is
2486 set. Assuming single-stepping and delivering a signal at the
2487 same time would takes us to the signal handler, then we could
2488 have removed the breakpoint at PC to step over it. However,
2489 some hardware step targets (like e.g., Mac OS) can't step
2490 into signal handlers, and for those, we need to leave the
2491 breakpoint at PC inserted, as otherwise if the handler
2492 recurses and executes PC again, it'll miss the breakpoint.
2493 So we leave the breakpoint inserted anyway, but we need to
2494 record that we tried to step a breakpoint instruction, so
372316f1
PA
2495 that adjust_pc_after_break doesn't end up confused.
2496
dda83cd7 2497 - In non-stop if we insert a breakpoint (e.g., a step-resume)
372316f1
PA
2498 in one thread after another thread that was stepping had been
2499 momentarily paused for a step-over. When we re-resume the
2500 stepping thread, it may be resumed from that address with a
2501 breakpoint that hasn't trapped yet. Seen with
2502 gdb.threads/non-stop-fair-events.exp, on targets that don't
2503 do displaced stepping. */
2504
1eb8556f
SM
2505 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2506 target_pid_to_str (tp->ptid).c_str ());
7f5ef605
PA
2507
2508 tp->stepped_breakpoint = 1;
2509
b0f16a3e
SM
2510 /* Most targets can step a breakpoint instruction, thus
2511 executing it normally. But if this one cannot, just
2512 continue and we will hit it anyway. */
7f5ef605 2513 if (gdbarch_cannot_step_breakpoint (gdbarch))
c4464ade 2514 step = false;
b0f16a3e 2515 }
ef5cf84e 2516
b0f16a3e 2517 if (debug_displaced
cb71640d 2518 && tp->control.trap_expected
3fc8eb30 2519 && use_displaced_stepping (tp)
cb71640d 2520 && !step_over_info_valid_p ())
b0f16a3e 2521 {
00431a78 2522 struct regcache *resume_regcache = get_thread_regcache (tp);
ac7936df 2523 struct gdbarch *resume_gdbarch = resume_regcache->arch ();
b0f16a3e
SM
2524 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2525 gdb_byte buf[4];
2526
b0f16a3e 2527 read_memory (actual_pc, buf, sizeof (buf));
136821d9
SM
2528 displaced_debug_printf ("run %s: %s",
2529 paddress (resume_gdbarch, actual_pc),
2530 displaced_step_dump_bytes
2531 (buf, sizeof (buf)).c_str ());
b0f16a3e 2532 }
237fc4c9 2533
b0f16a3e
SM
2534 if (tp->control.may_range_step)
2535 {
2536 /* If we're resuming a thread with the PC out of the step
2537 range, then we're doing some nested/finer run control
2538 operation, like stepping the thread out of the dynamic
2539 linker or the displaced stepping scratch pad. We
2540 shouldn't have allowed a range step then. */
2541 gdb_assert (pc_in_thread_step_range (pc, tp));
2542 }
c1e36e3e 2543
64ce06e4 2544 do_target_resume (resume_ptid, step, sig);
719546c4 2545 tp->resumed = true;
c906108c 2546}
71d378ae
PA
2547
2548/* Resume the inferior. SIG is the signal to give the inferior
2549 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2550 rolls back state on error. */
2551
aff4e175 2552static void
71d378ae
PA
2553resume (gdb_signal sig)
2554{
a70b8144 2555 try
71d378ae
PA
2556 {
2557 resume_1 (sig);
2558 }
230d2906 2559 catch (const gdb_exception &ex)
71d378ae
PA
2560 {
2561 /* If resuming is being aborted for any reason, delete any
2562 single-step breakpoint resume_1 may have created, to avoid
2563 confusing the following resumption, and to avoid leaving
2564 single-step breakpoints perturbing other threads, in case
2565 we're running in non-stop mode. */
2566 if (inferior_ptid != null_ptid)
2567 delete_single_step_breakpoints (inferior_thread ());
eedc3f4f 2568 throw;
71d378ae 2569 }
71d378ae
PA
2570}
2571
c906108c 2572\f
237fc4c9 2573/* Proceeding. */
c906108c 2574
4c2f2a79
PA
2575/* See infrun.h. */
2576
2577/* Counter that tracks number of user visible stops. This can be used
2578 to tell whether a command has proceeded the inferior past the
2579 current location. This allows e.g., inferior function calls in
2580 breakpoint commands to not interrupt the command list. When the
2581 call finishes successfully, the inferior is standing at the same
2582 breakpoint as if nothing happened (and so we don't call
2583 normal_stop). */
2584static ULONGEST current_stop_id;
2585
2586/* See infrun.h. */
2587
2588ULONGEST
2589get_stop_id (void)
2590{
2591 return current_stop_id;
2592}
2593
2594/* Called when we report a user visible stop. */
2595
2596static void
2597new_stop_id (void)
2598{
2599 current_stop_id++;
2600}
2601
c906108c
SS
2602/* Clear out all variables saying what to do when inferior is continued.
2603 First do this, then set the ones you want, then call `proceed'. */
2604
a7212384
UW
2605static void
2606clear_proceed_status_thread (struct thread_info *tp)
c906108c 2607{
1eb8556f 2608 infrun_debug_printf ("%s", target_pid_to_str (tp->ptid).c_str ());
d6b48e9c 2609
372316f1
PA
2610 /* If we're starting a new sequence, then the previous finished
2611 single-step is no longer relevant. */
2612 if (tp->suspend.waitstatus_pending_p)
2613 {
2614 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
2615 {
1eb8556f
SM
2616 infrun_debug_printf ("pending event of %s was a finished step. "
2617 "Discarding.",
2618 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
2619
2620 tp->suspend.waitstatus_pending_p = 0;
2621 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
2622 }
1eb8556f 2623 else
372316f1 2624 {
1eb8556f
SM
2625 infrun_debug_printf
2626 ("thread %s has pending wait status %s (currently_stepping=%d).",
2627 target_pid_to_str (tp->ptid).c_str (),
2628 target_waitstatus_to_string (&tp->suspend.waitstatus).c_str (),
2629 currently_stepping (tp));
372316f1
PA
2630 }
2631 }
2632
70509625
PA
2633 /* If this signal should not be seen by program, give it zero.
2634 Used for debugging signals. */
2635 if (!signal_pass_state (tp->suspend.stop_signal))
2636 tp->suspend.stop_signal = GDB_SIGNAL_0;
2637
46e3ed7f 2638 delete tp->thread_fsm;
243a9253
PA
2639 tp->thread_fsm = NULL;
2640
16c381f0
JK
2641 tp->control.trap_expected = 0;
2642 tp->control.step_range_start = 0;
2643 tp->control.step_range_end = 0;
c1e36e3e 2644 tp->control.may_range_step = 0;
16c381f0
JK
2645 tp->control.step_frame_id = null_frame_id;
2646 tp->control.step_stack_frame_id = null_frame_id;
2647 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
885eeb5b 2648 tp->control.step_start_function = NULL;
a7212384 2649 tp->stop_requested = 0;
4e1c45ea 2650
16c381f0 2651 tp->control.stop_step = 0;
32400beb 2652
16c381f0 2653 tp->control.proceed_to_finish = 0;
414c69f7 2654
856e7dd6 2655 tp->control.stepping_command = 0;
17b2616c 2656
a7212384 2657 /* Discard any remaining commands or status from previous stop. */
16c381f0 2658 bpstat_clear (&tp->control.stop_bpstat);
a7212384 2659}
32400beb 2660
a7212384 2661void
70509625 2662clear_proceed_status (int step)
a7212384 2663{
f2665db5
MM
2664 /* With scheduler-locking replay, stop replaying other threads if we're
2665 not replaying the user-visible resume ptid.
2666
2667 This is a convenience feature to not require the user to explicitly
2668 stop replaying the other threads. We're assuming that the user's
2669 intent is to resume tracing the recorded process. */
2670 if (!non_stop && scheduler_mode == schedlock_replay
2671 && target_record_is_replaying (minus_one_ptid)
2672 && !target_record_will_replay (user_visible_resume_ptid (step),
2673 execution_direction))
2674 target_record_stop_replaying ();
2675
08036331 2676 if (!non_stop && inferior_ptid != null_ptid)
6c95b8df 2677 {
08036331 2678 ptid_t resume_ptid = user_visible_resume_ptid (step);
5b6d1e4f
PA
2679 process_stratum_target *resume_target
2680 = user_visible_resume_target (resume_ptid);
70509625
PA
2681
2682 /* In all-stop mode, delete the per-thread status of all threads
2683 we're about to resume, implicitly and explicitly. */
5b6d1e4f 2684 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
08036331 2685 clear_proceed_status_thread (tp);
6c95b8df
PA
2686 }
2687
d7e15655 2688 if (inferior_ptid != null_ptid)
a7212384
UW
2689 {
2690 struct inferior *inferior;
2691
2692 if (non_stop)
2693 {
6c95b8df
PA
2694 /* If in non-stop mode, only delete the per-thread status of
2695 the current thread. */
a7212384
UW
2696 clear_proceed_status_thread (inferior_thread ());
2697 }
6c95b8df 2698
d6b48e9c 2699 inferior = current_inferior ();
16c381f0 2700 inferior->control.stop_soon = NO_STOP_QUIETLY;
4e1c45ea
PA
2701 }
2702
76727919 2703 gdb::observers::about_to_proceed.notify ();
c906108c
SS
2704}
2705
99619bea
PA
2706/* Returns true if TP is still stopped at a breakpoint that needs
2707 stepping-over in order to make progress. If the breakpoint is gone
2708 meanwhile, we can skip the whole step-over dance. */
ea67f13b 2709
c4464ade 2710static bool
6c4cfb24 2711thread_still_needs_step_over_bp (struct thread_info *tp)
99619bea
PA
2712{
2713 if (tp->stepping_over_breakpoint)
2714 {
00431a78 2715 struct regcache *regcache = get_thread_regcache (tp);
99619bea 2716
a01bda52 2717 if (breakpoint_here_p (regcache->aspace (),
af48d08f
PA
2718 regcache_read_pc (regcache))
2719 == ordinary_breakpoint_here)
c4464ade 2720 return true;
99619bea
PA
2721
2722 tp->stepping_over_breakpoint = 0;
2723 }
2724
c4464ade 2725 return false;
99619bea
PA
2726}
2727
6c4cfb24
PA
2728/* Check whether thread TP still needs to start a step-over in order
2729 to make progress when resumed. Returns an bitwise or of enum
2730 step_over_what bits, indicating what needs to be stepped over. */
2731
8d297bbf 2732static step_over_what
6c4cfb24
PA
2733thread_still_needs_step_over (struct thread_info *tp)
2734{
8d297bbf 2735 step_over_what what = 0;
6c4cfb24
PA
2736
2737 if (thread_still_needs_step_over_bp (tp))
2738 what |= STEP_OVER_BREAKPOINT;
2739
2740 if (tp->stepping_over_watchpoint
9aed480c 2741 && !target_have_steppable_watchpoint ())
6c4cfb24
PA
2742 what |= STEP_OVER_WATCHPOINT;
2743
2744 return what;
2745}
2746
483805cf
PA
2747/* Returns true if scheduler locking applies. STEP indicates whether
2748 we're about to do a step/next-like command to a thread. */
2749
c4464ade 2750static bool
856e7dd6 2751schedlock_applies (struct thread_info *tp)
483805cf
PA
2752{
2753 return (scheduler_mode == schedlock_on
2754 || (scheduler_mode == schedlock_step
f2665db5
MM
2755 && tp->control.stepping_command)
2756 || (scheduler_mode == schedlock_replay
2757 && target_record_will_replay (minus_one_ptid,
2758 execution_direction)));
483805cf
PA
2759}
2760
5b6d1e4f
PA
2761/* Calls target_commit_resume on all targets. */
2762
2763static void
2764commit_resume_all_targets ()
2765{
2766 scoped_restore_current_thread restore_thread;
2767
2768 /* Map between process_target and a representative inferior. This
2769 is to avoid committing a resume in the same target more than
2770 once. Resumptions must be idempotent, so this is an
2771 optimization. */
2772 std::unordered_map<process_stratum_target *, inferior *> conn_inf;
2773
2774 for (inferior *inf : all_non_exited_inferiors ())
2775 if (inf->has_execution ())
2776 conn_inf[inf->process_target ()] = inf;
2777
2778 for (const auto &ci : conn_inf)
2779 {
2780 inferior *inf = ci.second;
2781 switch_to_inferior_no_thread (inf);
2782 target_commit_resume ();
2783 }
2784}
2785
2f4fcf00
PA
2786/* Check that all the targets we're about to resume are in non-stop
2787 mode. Ideally, we'd only care whether all targets support
2788 target-async, but we're not there yet. E.g., stop_all_threads
2789 doesn't know how to handle all-stop targets. Also, the remote
2790 protocol in all-stop mode is synchronous, irrespective of
2791 target-async, which means that things like a breakpoint re-set
2792 triggered by one target would try to read memory from all targets
2793 and fail. */
2794
2795static void
2796check_multi_target_resumption (process_stratum_target *resume_target)
2797{
2798 if (!non_stop && resume_target == nullptr)
2799 {
2800 scoped_restore_current_thread restore_thread;
2801
2802 /* This is used to track whether we're resuming more than one
2803 target. */
2804 process_stratum_target *first_connection = nullptr;
2805
2806 /* The first inferior we see with a target that does not work in
2807 always-non-stop mode. */
2808 inferior *first_not_non_stop = nullptr;
2809
2810 for (inferior *inf : all_non_exited_inferiors (resume_target))
2811 {
2812 switch_to_inferior_no_thread (inf);
2813
55f6301a 2814 if (!target_has_execution ())
2f4fcf00
PA
2815 continue;
2816
2817 process_stratum_target *proc_target
2818 = current_inferior ()->process_target();
2819
2820 if (!target_is_non_stop_p ())
2821 first_not_non_stop = inf;
2822
2823 if (first_connection == nullptr)
2824 first_connection = proc_target;
2825 else if (first_connection != proc_target
2826 && first_not_non_stop != nullptr)
2827 {
2828 switch_to_inferior_no_thread (first_not_non_stop);
2829
2830 proc_target = current_inferior ()->process_target();
2831
2832 error (_("Connection %d (%s) does not support "
2833 "multi-target resumption."),
2834 proc_target->connection_number,
2835 make_target_connection_string (proc_target).c_str ());
2836 }
2837 }
2838 }
2839}
2840
c906108c
SS
2841/* Basic routine for continuing the program in various fashions.
2842
2843 ADDR is the address to resume at, or -1 for resume where stopped.
aff4e175
AB
2844 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
2845 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
c906108c
SS
2846
2847 You should call clear_proceed_status before calling proceed. */
2848
2849void
64ce06e4 2850proceed (CORE_ADDR addr, enum gdb_signal siggnal)
c906108c 2851{
e58b0e63
PA
2852 struct regcache *regcache;
2853 struct gdbarch *gdbarch;
e58b0e63 2854 CORE_ADDR pc;
4d9d9d04
PA
2855 struct execution_control_state ecss;
2856 struct execution_control_state *ecs = &ecss;
c4464ade 2857 bool started;
c906108c 2858
e58b0e63
PA
2859 /* If we're stopped at a fork/vfork, follow the branch set by the
2860 "set follow-fork-mode" command; otherwise, we'll just proceed
2861 resuming the current thread. */
2862 if (!follow_fork ())
2863 {
2864 /* The target for some reason decided not to resume. */
2865 normal_stop ();
f148b27e 2866 if (target_can_async_p ())
b1a35af2 2867 inferior_event_handler (INF_EXEC_COMPLETE);
e58b0e63
PA
2868 return;
2869 }
2870
842951eb
PA
2871 /* We'll update this if & when we switch to a new thread. */
2872 previous_inferior_ptid = inferior_ptid;
2873
e58b0e63 2874 regcache = get_current_regcache ();
ac7936df 2875 gdbarch = regcache->arch ();
8b86c959
YQ
2876 const address_space *aspace = regcache->aspace ();
2877
fc75c28b
TBA
2878 pc = regcache_read_pc_protected (regcache);
2879
08036331 2880 thread_info *cur_thr = inferior_thread ();
e58b0e63 2881
99619bea 2882 /* Fill in with reasonable starting values. */
08036331 2883 init_thread_stepping_state (cur_thr);
99619bea 2884
08036331 2885 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
c2829269 2886
5b6d1e4f
PA
2887 ptid_t resume_ptid
2888 = user_visible_resume_ptid (cur_thr->control.stepping_command);
2889 process_stratum_target *resume_target
2890 = user_visible_resume_target (resume_ptid);
2891
2f4fcf00
PA
2892 check_multi_target_resumption (resume_target);
2893
2acceee2 2894 if (addr == (CORE_ADDR) -1)
c906108c 2895 {
08036331 2896 if (pc == cur_thr->suspend.stop_pc
af48d08f 2897 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
b2175913 2898 && execution_direction != EXEC_REVERSE)
3352ef37
AC
2899 /* There is a breakpoint at the address we will resume at,
2900 step one instruction before inserting breakpoints so that
2901 we do not stop right away (and report a second hit at this
b2175913
MS
2902 breakpoint).
2903
2904 Note, we don't do this in reverse, because we won't
2905 actually be executing the breakpoint insn anyway.
2906 We'll be (un-)executing the previous instruction. */
08036331 2907 cur_thr->stepping_over_breakpoint = 1;
515630c5
UW
2908 else if (gdbarch_single_step_through_delay_p (gdbarch)
2909 && gdbarch_single_step_through_delay (gdbarch,
2910 get_current_frame ()))
3352ef37
AC
2911 /* We stepped onto an instruction that needs to be stepped
2912 again before re-inserting the breakpoint, do so. */
08036331 2913 cur_thr->stepping_over_breakpoint = 1;
c906108c
SS
2914 }
2915 else
2916 {
515630c5 2917 regcache_write_pc (regcache, addr);
c906108c
SS
2918 }
2919
70509625 2920 if (siggnal != GDB_SIGNAL_DEFAULT)
08036331 2921 cur_thr->suspend.stop_signal = siggnal;
70509625 2922
4d9d9d04
PA
2923 /* If an exception is thrown from this point on, make sure to
2924 propagate GDB's knowledge of the executing state to the
2925 frontend/user running state. */
5b6d1e4f 2926 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
4d9d9d04
PA
2927
2928 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
2929 threads (e.g., we might need to set threads stepping over
2930 breakpoints first), from the user/frontend's point of view, all
2931 threads in RESUME_PTID are now running. Unless we're calling an
2932 inferior function, as in that case we pretend the inferior
2933 doesn't run at all. */
08036331 2934 if (!cur_thr->control.in_infcall)
719546c4 2935 set_running (resume_target, resume_ptid, true);
17b2616c 2936
1eb8556f
SM
2937 infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
2938 gdb_signal_to_symbol_string (siggnal));
527159b7 2939
4d9d9d04
PA
2940 annotate_starting ();
2941
2942 /* Make sure that output from GDB appears before output from the
2943 inferior. */
2944 gdb_flush (gdb_stdout);
2945
d930703d
PA
2946 /* Since we've marked the inferior running, give it the terminal. A
2947 QUIT/Ctrl-C from here on is forwarded to the target (which can
2948 still detect attempts to unblock a stuck connection with repeated
2949 Ctrl-C from within target_pass_ctrlc). */
2950 target_terminal::inferior ();
2951
4d9d9d04
PA
2952 /* In a multi-threaded task we may select another thread and
2953 then continue or step.
2954
2955 But if a thread that we're resuming had stopped at a breakpoint,
2956 it will immediately cause another breakpoint stop without any
2957 execution (i.e. it will report a breakpoint hit incorrectly). So
2958 we must step over it first.
2959
2960 Look for threads other than the current (TP) that reported a
2961 breakpoint hit and haven't been resumed yet since. */
2962
2963 /* If scheduler locking applies, we can avoid iterating over all
2964 threads. */
08036331 2965 if (!non_stop && !schedlock_applies (cur_thr))
94cc34af 2966 {
5b6d1e4f
PA
2967 for (thread_info *tp : all_non_exited_threads (resume_target,
2968 resume_ptid))
08036331 2969 {
f3f8ece4
PA
2970 switch_to_thread_no_regs (tp);
2971
4d9d9d04
PA
2972 /* Ignore the current thread here. It's handled
2973 afterwards. */
08036331 2974 if (tp == cur_thr)
4d9d9d04 2975 continue;
c906108c 2976
4d9d9d04
PA
2977 if (!thread_still_needs_step_over (tp))
2978 continue;
2979
2980 gdb_assert (!thread_is_in_step_over_chain (tp));
c906108c 2981
1eb8556f
SM
2982 infrun_debug_printf ("need to step-over [%s] first",
2983 target_pid_to_str (tp->ptid).c_str ());
99619bea 2984
28d5518b 2985 global_thread_step_over_chain_enqueue (tp);
2adfaa28 2986 }
f3f8ece4
PA
2987
2988 switch_to_thread (cur_thr);
30852783
UW
2989 }
2990
4d9d9d04
PA
2991 /* Enqueue the current thread last, so that we move all other
2992 threads over their breakpoints first. */
08036331 2993 if (cur_thr->stepping_over_breakpoint)
28d5518b 2994 global_thread_step_over_chain_enqueue (cur_thr);
30852783 2995
4d9d9d04
PA
2996 /* If the thread isn't started, we'll still need to set its prev_pc,
2997 so that switch_back_to_stepped_thread knows the thread hasn't
2998 advanced. Must do this before resuming any thread, as in
2999 all-stop/remote, once we resume we can't send any other packet
3000 until the target stops again. */
fc75c28b 3001 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
99619bea 3002
a9bc57b9
TT
3003 {
3004 scoped_restore save_defer_tc = make_scoped_defer_target_commit_resume ();
85ad3aaf 3005
a9bc57b9 3006 started = start_step_over ();
c906108c 3007
a9bc57b9
TT
3008 if (step_over_info_valid_p ())
3009 {
3010 /* Either this thread started a new in-line step over, or some
3011 other thread was already doing one. In either case, don't
3012 resume anything else until the step-over is finished. */
3013 }
3014 else if (started && !target_is_non_stop_p ())
3015 {
3016 /* A new displaced stepping sequence was started. In all-stop,
3017 we can't talk to the target anymore until it next stops. */
3018 }
3019 else if (!non_stop && target_is_non_stop_p ())
3020 {
3021 /* In all-stop, but the target is always in non-stop mode.
3022 Start all other threads that are implicitly resumed too. */
5b6d1e4f
PA
3023 for (thread_info *tp : all_non_exited_threads (resume_target,
3024 resume_ptid))
3025 {
3026 switch_to_thread_no_regs (tp);
3027
f9fac3c8
SM
3028 if (!tp->inf->has_execution ())
3029 {
1eb8556f
SM
3030 infrun_debug_printf ("[%s] target has no execution",
3031 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3032 continue;
3033 }
f3f8ece4 3034
f9fac3c8
SM
3035 if (tp->resumed)
3036 {
1eb8556f
SM
3037 infrun_debug_printf ("[%s] resumed",
3038 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3039 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
3040 continue;
3041 }
fbea99ea 3042
f9fac3c8
SM
3043 if (thread_is_in_step_over_chain (tp))
3044 {
1eb8556f
SM
3045 infrun_debug_printf ("[%s] needs step-over",
3046 target_pid_to_str (tp->ptid).c_str ());
f9fac3c8
SM
3047 continue;
3048 }
fbea99ea 3049
1eb8556f 3050 infrun_debug_printf ("resuming %s",
dda83cd7 3051 target_pid_to_str (tp->ptid).c_str ());
fbea99ea 3052
f9fac3c8
SM
3053 reset_ecs (ecs, tp);
3054 switch_to_thread (tp);
3055 keep_going_pass_signal (ecs);
3056 if (!ecs->wait_some_more)
3057 error (_("Command aborted."));
3058 }
a9bc57b9 3059 }
08036331 3060 else if (!cur_thr->resumed && !thread_is_in_step_over_chain (cur_thr))
a9bc57b9
TT
3061 {
3062 /* The thread wasn't started, and isn't queued, run it now. */
08036331
PA
3063 reset_ecs (ecs, cur_thr);
3064 switch_to_thread (cur_thr);
a9bc57b9
TT
3065 keep_going_pass_signal (ecs);
3066 if (!ecs->wait_some_more)
3067 error (_("Command aborted."));
3068 }
3069 }
c906108c 3070
5b6d1e4f 3071 commit_resume_all_targets ();
85ad3aaf 3072
731f534f 3073 finish_state.release ();
c906108c 3074
873657b9
PA
3075 /* If we've switched threads above, switch back to the previously
3076 current thread. We don't want the user to see a different
3077 selected thread. */
3078 switch_to_thread (cur_thr);
3079
0b333c5e
PA
3080 /* Tell the event loop to wait for it to stop. If the target
3081 supports asynchronous execution, it'll do this from within
3082 target_resume. */
362646f5 3083 if (!target_can_async_p ())
0b333c5e 3084 mark_async_event_handler (infrun_async_inferior_event_token);
c906108c 3085}
c906108c
SS
3086\f
3087
3088/* Start remote-debugging of a machine over a serial link. */
96baa820 3089
c906108c 3090void
8621d6a9 3091start_remote (int from_tty)
c906108c 3092{
5b6d1e4f
PA
3093 inferior *inf = current_inferior ();
3094 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
43ff13b4 3095
1777feb0 3096 /* Always go on waiting for the target, regardless of the mode. */
6426a772 3097 /* FIXME: cagney/1999-09-23: At present it isn't possible to
7e73cedf 3098 indicate to wait_for_inferior that a target should timeout if
6426a772
JM
3099 nothing is returned (instead of just blocking). Because of this,
3100 targets expecting an immediate response need to, internally, set
3101 things up so that the target_wait() is forced to eventually
1777feb0 3102 timeout. */
6426a772
JM
3103 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3104 differentiate to its caller what the state of the target is after
3105 the initial open has been performed. Here we're assuming that
3106 the target has stopped. It should be possible to eventually have
3107 target_open() return to the caller an indication that the target
3108 is currently running and GDB state should be set to the same as
1777feb0 3109 for an async run. */
5b6d1e4f 3110 wait_for_inferior (inf);
8621d6a9
DJ
3111
3112 /* Now that the inferior has stopped, do any bookkeeping like
3113 loading shared libraries. We want to do this before normal_stop,
3114 so that the displayed frame is up to date. */
a7aba266 3115 post_create_inferior (from_tty);
8621d6a9 3116
6426a772 3117 normal_stop ();
c906108c
SS
3118}
3119
3120/* Initialize static vars when a new inferior begins. */
3121
3122void
96baa820 3123init_wait_for_inferior (void)
c906108c
SS
3124{
3125 /* These are meaningless until the first time through wait_for_inferior. */
c906108c 3126
c906108c
SS
3127 breakpoint_init_inferior (inf_starting);
3128
70509625 3129 clear_proceed_status (0);
9f976b41 3130
ab1ddbcf 3131 nullify_last_target_wait_ptid ();
237fc4c9 3132
842951eb 3133 previous_inferior_ptid = inferior_ptid;
c906108c 3134}
237fc4c9 3135
c906108c 3136\f
488f131b 3137
ec9499be 3138static void handle_inferior_event (struct execution_control_state *ecs);
cd0fc7c3 3139
568d6575
UW
3140static void handle_step_into_function (struct gdbarch *gdbarch,
3141 struct execution_control_state *ecs);
3142static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3143 struct execution_control_state *ecs);
4f5d7f63 3144static void handle_signal_stop (struct execution_control_state *ecs);
186c406b 3145static void check_exception_resume (struct execution_control_state *,
28106bc2 3146 struct frame_info *);
611c83ae 3147
bdc36728 3148static void end_stepping_range (struct execution_control_state *ecs);
22bcd14b 3149static void stop_waiting (struct execution_control_state *ecs);
d4f3574e 3150static void keep_going (struct execution_control_state *ecs);
94c57d6a 3151static void process_event_stop_test (struct execution_control_state *ecs);
c4464ade 3152static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
104c1213 3153
252fbfc8
PA
3154/* This function is attached as a "thread_stop_requested" observer.
3155 Cleanup local state that assumed the PTID was to be resumed, and
3156 report the stop to the frontend. */
3157
2c0b251b 3158static void
252fbfc8
PA
3159infrun_thread_stop_requested (ptid_t ptid)
3160{
5b6d1e4f
PA
3161 process_stratum_target *curr_target = current_inferior ()->process_target ();
3162
c65d6b55
PA
3163 /* PTID was requested to stop. If the thread was already stopped,
3164 but the user/frontend doesn't know about that yet (e.g., the
3165 thread had been temporarily paused for some step-over), set up
3166 for reporting the stop now. */
5b6d1e4f 3167 for (thread_info *tp : all_threads (curr_target, ptid))
08036331
PA
3168 {
3169 if (tp->state != THREAD_RUNNING)
3170 continue;
3171 if (tp->executing)
3172 continue;
c65d6b55 3173
08036331
PA
3174 /* Remove matching threads from the step-over queue, so
3175 start_step_over doesn't try to resume them
3176 automatically. */
3177 if (thread_is_in_step_over_chain (tp))
28d5518b 3178 global_thread_step_over_chain_remove (tp);
c65d6b55 3179
08036331
PA
3180 /* If the thread is stopped, but the user/frontend doesn't
3181 know about that yet, queue a pending event, as if the
3182 thread had just stopped now. Unless the thread already had
3183 a pending event. */
3184 if (!tp->suspend.waitstatus_pending_p)
3185 {
3186 tp->suspend.waitstatus_pending_p = 1;
3187 tp->suspend.waitstatus.kind = TARGET_WAITKIND_STOPPED;
3188 tp->suspend.waitstatus.value.sig = GDB_SIGNAL_0;
3189 }
c65d6b55 3190
08036331
PA
3191 /* Clear the inline-frame state, since we're re-processing the
3192 stop. */
5b6d1e4f 3193 clear_inline_frame_state (tp);
c65d6b55 3194
08036331
PA
3195 /* If this thread was paused because some other thread was
3196 doing an inline-step over, let that finish first. Once
3197 that happens, we'll restart all threads and consume pending
3198 stop events then. */
3199 if (step_over_info_valid_p ())
3200 continue;
3201
3202 /* Otherwise we can process the (new) pending event now. Set
3203 it so this pending event is considered by
3204 do_target_wait. */
719546c4 3205 tp->resumed = true;
08036331 3206 }
252fbfc8
PA
3207}
3208
a07daef3
PA
3209static void
3210infrun_thread_thread_exit (struct thread_info *tp, int silent)
3211{
5b6d1e4f
PA
3212 if (target_last_proc_target == tp->inf->process_target ()
3213 && target_last_wait_ptid == tp->ptid)
a07daef3
PA
3214 nullify_last_target_wait_ptid ();
3215}
3216
0cbcdb96
PA
3217/* Delete the step resume, single-step and longjmp/exception resume
3218 breakpoints of TP. */
4e1c45ea 3219
0cbcdb96
PA
3220static void
3221delete_thread_infrun_breakpoints (struct thread_info *tp)
4e1c45ea 3222{
0cbcdb96
PA
3223 delete_step_resume_breakpoint (tp);
3224 delete_exception_resume_breakpoint (tp);
34b7e8a6 3225 delete_single_step_breakpoints (tp);
4e1c45ea
PA
3226}
3227
0cbcdb96
PA
3228/* If the target still has execution, call FUNC for each thread that
3229 just stopped. In all-stop, that's all the non-exited threads; in
3230 non-stop, that's the current thread, only. */
3231
3232typedef void (*for_each_just_stopped_thread_callback_func)
3233 (struct thread_info *tp);
4e1c45ea
PA
3234
3235static void
0cbcdb96 3236for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
4e1c45ea 3237{
55f6301a 3238 if (!target_has_execution () || inferior_ptid == null_ptid)
4e1c45ea
PA
3239 return;
3240
fbea99ea 3241 if (target_is_non_stop_p ())
4e1c45ea 3242 {
0cbcdb96
PA
3243 /* If in non-stop mode, only the current thread stopped. */
3244 func (inferior_thread ());
4e1c45ea
PA
3245 }
3246 else
0cbcdb96 3247 {
0cbcdb96 3248 /* In all-stop mode, all threads have stopped. */
08036331
PA
3249 for (thread_info *tp : all_non_exited_threads ())
3250 func (tp);
0cbcdb96
PA
3251 }
3252}
3253
3254/* Delete the step resume and longjmp/exception resume breakpoints of
3255 the threads that just stopped. */
3256
3257static void
3258delete_just_stopped_threads_infrun_breakpoints (void)
3259{
3260 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
34b7e8a6
PA
3261}
3262
3263/* Delete the single-step breakpoints of the threads that just
3264 stopped. */
7c16b83e 3265
34b7e8a6
PA
3266static void
3267delete_just_stopped_threads_single_step_breakpoints (void)
3268{
3269 for_each_just_stopped_thread (delete_single_step_breakpoints);
4e1c45ea
PA
3270}
3271
221e1a37 3272/* See infrun.h. */
223698f8 3273
221e1a37 3274void
223698f8
DE
3275print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3276 const struct target_waitstatus *ws)
3277{
23fdd69e 3278 std::string status_string = target_waitstatus_to_string (ws);
d7e74731 3279 string_file stb;
223698f8
DE
3280
3281 /* The text is split over several lines because it was getting too long.
3282 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3283 output as a unit; we want only one timestamp printed if debug_timestamp
3284 is set. */
3285
1eb8556f 3286 stb.printf ("[infrun] target_wait (%d.%ld.%ld",
e99b03dc 3287 waiton_ptid.pid (),
e38504b3 3288 waiton_ptid.lwp (),
cc6bcb54 3289 waiton_ptid.tid ());
e99b03dc 3290 if (waiton_ptid.pid () != -1)
a068643d 3291 stb.printf (" [%s]", target_pid_to_str (waiton_ptid).c_str ());
d7e74731 3292 stb.printf (", status) =\n");
1eb8556f 3293 stb.printf ("[infrun] %d.%ld.%ld [%s],\n",
e99b03dc 3294 result_ptid.pid (),
e38504b3 3295 result_ptid.lwp (),
cc6bcb54 3296 result_ptid.tid (),
a068643d 3297 target_pid_to_str (result_ptid).c_str ());
1eb8556f 3298 stb.printf ("[infrun] %s\n", status_string.c_str ());
223698f8
DE
3299
3300 /* This uses %s in part to handle %'s in the text, but also to avoid
3301 a gcc error: the format attribute requires a string literal. */
d7e74731 3302 fprintf_unfiltered (gdb_stdlog, "%s", stb.c_str ());
223698f8
DE
3303}
3304
372316f1
PA
3305/* Select a thread at random, out of those which are resumed and have
3306 had events. */
3307
3308static struct thread_info *
5b6d1e4f 3309random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
372316f1 3310{
372316f1 3311 int num_events = 0;
08036331 3312
5b6d1e4f 3313 auto has_event = [&] (thread_info *tp)
08036331 3314 {
5b6d1e4f
PA
3315 return (tp->ptid.matches (waiton_ptid)
3316 && tp->resumed
08036331
PA
3317 && tp->suspend.waitstatus_pending_p);
3318 };
372316f1
PA
3319
3320 /* First see how many events we have. Count only resumed threads
3321 that have an event pending. */
5b6d1e4f 3322 for (thread_info *tp : inf->non_exited_threads ())
08036331 3323 if (has_event (tp))
372316f1
PA
3324 num_events++;
3325
3326 if (num_events == 0)
3327 return NULL;
3328
3329 /* Now randomly pick a thread out of those that have had events. */
08036331
PA
3330 int random_selector = (int) ((num_events * (double) rand ())
3331 / (RAND_MAX + 1.0));
372316f1 3332
1eb8556f
SM
3333 if (num_events > 1)
3334 infrun_debug_printf ("Found %d events, selecting #%d",
3335 num_events, random_selector);
372316f1
PA
3336
3337 /* Select the Nth thread that has had an event. */
5b6d1e4f 3338 for (thread_info *tp : inf->non_exited_threads ())
08036331 3339 if (has_event (tp))
372316f1 3340 if (random_selector-- == 0)
08036331 3341 return tp;
372316f1 3342
08036331 3343 gdb_assert_not_reached ("event thread not found");
372316f1
PA
3344}
3345
3346/* Wrapper for target_wait that first checks whether threads have
3347 pending statuses to report before actually asking the target for
5b6d1e4f
PA
3348 more events. INF is the inferior we're using to call target_wait
3349 on. */
372316f1
PA
3350
3351static ptid_t
5b6d1e4f 3352do_target_wait_1 (inferior *inf, ptid_t ptid,
b60cea74 3353 target_waitstatus *status, target_wait_flags options)
372316f1
PA
3354{
3355 ptid_t event_ptid;
3356 struct thread_info *tp;
3357
24ed6739
AB
3358 /* We know that we are looking for an event in the target of inferior
3359 INF, but we don't know which thread the event might come from. As
3360 such we want to make sure that INFERIOR_PTID is reset so that none of
3361 the wait code relies on it - doing so is always a mistake. */
3362 switch_to_inferior_no_thread (inf);
3363
372316f1
PA
3364 /* First check if there is a resumed thread with a wait status
3365 pending. */
d7e15655 3366 if (ptid == minus_one_ptid || ptid.is_pid ())
372316f1 3367 {
5b6d1e4f 3368 tp = random_pending_event_thread (inf, ptid);
372316f1
PA
3369 }
3370 else
3371 {
1eb8556f
SM
3372 infrun_debug_printf ("Waiting for specific thread %s.",
3373 target_pid_to_str (ptid).c_str ());
372316f1
PA
3374
3375 /* We have a specific thread to check. */
5b6d1e4f 3376 tp = find_thread_ptid (inf, ptid);
372316f1
PA
3377 gdb_assert (tp != NULL);
3378 if (!tp->suspend.waitstatus_pending_p)
3379 tp = NULL;
3380 }
3381
3382 if (tp != NULL
3383 && (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3384 || tp->suspend.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
3385 {
00431a78 3386 struct regcache *regcache = get_thread_regcache (tp);
ac7936df 3387 struct gdbarch *gdbarch = regcache->arch ();
372316f1
PA
3388 CORE_ADDR pc;
3389 int discard = 0;
3390
3391 pc = regcache_read_pc (regcache);
3392
3393 if (pc != tp->suspend.stop_pc)
3394 {
1eb8556f
SM
3395 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3396 target_pid_to_str (tp->ptid).c_str (),
3397 paddress (gdbarch, tp->suspend.stop_pc),
3398 paddress (gdbarch, pc));
372316f1
PA
3399 discard = 1;
3400 }
a01bda52 3401 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
372316f1 3402 {
1eb8556f
SM
3403 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3404 target_pid_to_str (tp->ptid).c_str (),
3405 paddress (gdbarch, pc));
372316f1
PA
3406
3407 discard = 1;
3408 }
3409
3410 if (discard)
3411 {
1eb8556f
SM
3412 infrun_debug_printf ("pending event of %s cancelled.",
3413 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3414
3415 tp->suspend.waitstatus.kind = TARGET_WAITKIND_SPURIOUS;
3416 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3417 }
3418 }
3419
3420 if (tp != NULL)
3421 {
1eb8556f
SM
3422 infrun_debug_printf ("Using pending wait status %s for %s.",
3423 target_waitstatus_to_string
3424 (&tp->suspend.waitstatus).c_str (),
3425 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
3426
3427 /* Now that we've selected our final event LWP, un-adjust its PC
3428 if it was a software breakpoint (and the target doesn't
3429 always adjust the PC itself). */
3430 if (tp->suspend.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3431 && !target_supports_stopped_by_sw_breakpoint ())
3432 {
3433 struct regcache *regcache;
3434 struct gdbarch *gdbarch;
3435 int decr_pc;
3436
00431a78 3437 regcache = get_thread_regcache (tp);
ac7936df 3438 gdbarch = regcache->arch ();
372316f1
PA
3439
3440 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3441 if (decr_pc != 0)
3442 {
3443 CORE_ADDR pc;
3444
3445 pc = regcache_read_pc (regcache);
3446 regcache_write_pc (regcache, pc + decr_pc);
3447 }
3448 }
3449
3450 tp->suspend.stop_reason = TARGET_STOPPED_BY_NO_REASON;
3451 *status = tp->suspend.waitstatus;
3452 tp->suspend.waitstatus_pending_p = 0;
3453
3454 /* Wake up the event loop again, until all pending events are
3455 processed. */
3456 if (target_is_async_p ())
3457 mark_async_event_handler (infrun_async_inferior_event_token);
3458 return tp->ptid;
3459 }
3460
3461 /* But if we don't find one, we'll have to wait. */
3462
d3a07122
SM
3463 /* We can't ask a non-async target to do a non-blocking wait, so this will be
3464 a blocking wait. */
3465 if (!target_can_async_p ())
3466 options &= ~TARGET_WNOHANG;
3467
372316f1
PA
3468 if (deprecated_target_wait_hook)
3469 event_ptid = deprecated_target_wait_hook (ptid, status, options);
3470 else
3471 event_ptid = target_wait (ptid, status, options);
3472
3473 return event_ptid;
3474}
3475
5b6d1e4f
PA
3476/* Wrapper for target_wait that first checks whether threads have
3477 pending statuses to report before actually asking the target for
b3e3a4c1 3478 more events. Polls for events from all inferiors/targets. */
5b6d1e4f
PA
3479
3480static bool
b60cea74
TT
3481do_target_wait (ptid_t wait_ptid, execution_control_state *ecs,
3482 target_wait_flags options)
5b6d1e4f
PA
3483{
3484 int num_inferiors = 0;
3485 int random_selector;
3486
b3e3a4c1
SM
3487 /* For fairness, we pick the first inferior/target to poll at random
3488 out of all inferiors that may report events, and then continue
3489 polling the rest of the inferior list starting from that one in a
3490 circular fashion until the whole list is polled once. */
5b6d1e4f
PA
3491
3492 auto inferior_matches = [&wait_ptid] (inferior *inf)
3493 {
3494 return (inf->process_target () != NULL
5b6d1e4f
PA
3495 && ptid_t (inf->pid).matches (wait_ptid));
3496 };
3497
b3e3a4c1 3498 /* First see how many matching inferiors we have. */
5b6d1e4f
PA
3499 for (inferior *inf : all_inferiors ())
3500 if (inferior_matches (inf))
3501 num_inferiors++;
3502
3503 if (num_inferiors == 0)
3504 {
3505 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3506 return false;
3507 }
3508
b3e3a4c1 3509 /* Now randomly pick an inferior out of those that matched. */
5b6d1e4f
PA
3510 random_selector = (int)
3511 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
3512
1eb8556f
SM
3513 if (num_inferiors > 1)
3514 infrun_debug_printf ("Found %d inferiors, starting at #%d",
3515 num_inferiors, random_selector);
5b6d1e4f 3516
b3e3a4c1 3517 /* Select the Nth inferior that matched. */
5b6d1e4f
PA
3518
3519 inferior *selected = nullptr;
3520
3521 for (inferior *inf : all_inferiors ())
3522 if (inferior_matches (inf))
3523 if (random_selector-- == 0)
3524 {
3525 selected = inf;
3526 break;
3527 }
3528
b3e3a4c1 3529 /* Now poll for events out of each of the matching inferior's
5b6d1e4f
PA
3530 targets, starting from the selected one. */
3531
3532 auto do_wait = [&] (inferior *inf)
3533 {
5b6d1e4f
PA
3534 ecs->ptid = do_target_wait_1 (inf, wait_ptid, &ecs->ws, options);
3535 ecs->target = inf->process_target ();
3536 return (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3537 };
3538
b3e3a4c1
SM
3539 /* Needed in 'all-stop + target-non-stop' mode, because we end up
3540 here spuriously after the target is all stopped and we've already
5b6d1e4f
PA
3541 reported the stop to the user, polling for events. */
3542 scoped_restore_current_thread restore_thread;
3543
3544 int inf_num = selected->num;
3545 for (inferior *inf = selected; inf != NULL; inf = inf->next)
3546 if (inferior_matches (inf))
3547 if (do_wait (inf))
3548 return true;
3549
3550 for (inferior *inf = inferior_list;
3551 inf != NULL && inf->num < inf_num;
3552 inf = inf->next)
3553 if (inferior_matches (inf))
3554 if (do_wait (inf))
3555 return true;
3556
3557 ecs->ws.kind = TARGET_WAITKIND_IGNORE;
3558 return false;
3559}
3560
24291992
PA
3561/* Prepare and stabilize the inferior for detaching it. E.g.,
3562 detaching while a thread is displaced stepping is a recipe for
3563 crashing it, as nothing would readjust the PC out of the scratch
3564 pad. */
3565
3566void
3567prepare_for_detach (void)
3568{
3569 struct inferior *inf = current_inferior ();
f2907e49 3570 ptid_t pid_ptid = ptid_t (inf->pid);
24291992 3571
24291992
PA
3572 /* Is any thread of this process displaced stepping? If not,
3573 there's nothing else to do. */
187b041e 3574 if (displaced_step_in_progress (inf))
24291992
PA
3575 return;
3576
1eb8556f 3577 infrun_debug_printf ("displaced-stepping in-process while detaching");
24291992 3578
9bcb1f16 3579 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
24291992 3580
187b041e 3581 while (displaced_step_in_progress (inf))
24291992 3582 {
24291992
PA
3583 struct execution_control_state ecss;
3584 struct execution_control_state *ecs;
3585
3586 ecs = &ecss;
3587 memset (ecs, 0, sizeof (*ecs));
3588
3589 overlay_cache_invalid = 1;
f15cb84a
YQ
3590 /* Flush target cache before starting to handle each event.
3591 Target was running and cache could be stale. This is just a
3592 heuristic. Running threads may modify target memory, but we
3593 don't get any event. */
3594 target_dcache_invalidate ();
24291992 3595
5b6d1e4f 3596 do_target_wait (pid_ptid, ecs, 0);
24291992
PA
3597
3598 if (debug_infrun)
3599 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3600
3601 /* If an error happens while handling the event, propagate GDB's
3602 knowledge of the executing state to the frontend/user running
3603 state. */
5b6d1e4f
PA
3604 scoped_finish_thread_state finish_state (inf->process_target (),
3605 minus_one_ptid);
24291992
PA
3606
3607 /* Now figure out what to do with the result of the result. */
3608 handle_inferior_event (ecs);
3609
3610 /* No error, don't finish the state yet. */
731f534f 3611 finish_state.release ();
24291992
PA
3612
3613 /* Breakpoints and watchpoints are not installed on the target
3614 at this point, and signals are passed directly to the
3615 inferior, so this must mean the process is gone. */
3616 if (!ecs->wait_some_more)
3617 {
9bcb1f16 3618 restore_detaching.release ();
24291992
PA
3619 error (_("Program exited while detaching"));
3620 }
3621 }
3622
9bcb1f16 3623 restore_detaching.release ();
24291992
PA
3624}
3625
cd0fc7c3 3626/* Wait for control to return from inferior to debugger.
ae123ec6 3627
cd0fc7c3
SS
3628 If inferior gets a signal, we may decide to start it up again
3629 instead of returning. That is why there is a loop in this function.
3630 When this function actually returns it means the inferior
3631 should be left stopped and GDB should read more commands. */
3632
5b6d1e4f
PA
3633static void
3634wait_for_inferior (inferior *inf)
cd0fc7c3 3635{
1eb8556f 3636 infrun_debug_printf ("wait_for_inferior ()");
527159b7 3637
4c41382a 3638 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
cd0fc7c3 3639
e6f5c25b
PA
3640 /* If an error happens while handling the event, propagate GDB's
3641 knowledge of the executing state to the frontend/user running
3642 state. */
5b6d1e4f
PA
3643 scoped_finish_thread_state finish_state
3644 (inf->process_target (), minus_one_ptid);
e6f5c25b 3645
c906108c
SS
3646 while (1)
3647 {
ae25568b
PA
3648 struct execution_control_state ecss;
3649 struct execution_control_state *ecs = &ecss;
29f49a6a 3650
ae25568b
PA
3651 memset (ecs, 0, sizeof (*ecs));
3652
ec9499be 3653 overlay_cache_invalid = 1;
ec9499be 3654
f15cb84a
YQ
3655 /* Flush target cache before starting to handle each event.
3656 Target was running and cache could be stale. This is just a
3657 heuristic. Running threads may modify target memory, but we
3658 don't get any event. */
3659 target_dcache_invalidate ();
3660
5b6d1e4f
PA
3661 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
3662 ecs->target = inf->process_target ();
c906108c 3663
f00150c9 3664 if (debug_infrun)
5b6d1e4f 3665 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
f00150c9 3666
cd0fc7c3
SS
3667 /* Now figure out what to do with the result of the result. */
3668 handle_inferior_event (ecs);
c906108c 3669
cd0fc7c3
SS
3670 if (!ecs->wait_some_more)
3671 break;
3672 }
4e1c45ea 3673
e6f5c25b 3674 /* No error, don't finish the state yet. */
731f534f 3675 finish_state.release ();
cd0fc7c3 3676}
c906108c 3677
d3d4baed
PA
3678/* Cleanup that reinstalls the readline callback handler, if the
3679 target is running in the background. If while handling the target
3680 event something triggered a secondary prompt, like e.g., a
3681 pagination prompt, we'll have removed the callback handler (see
3682 gdb_readline_wrapper_line). Need to do this as we go back to the
3683 event loop, ready to process further input. Note this has no
3684 effect if the handler hasn't actually been removed, because calling
3685 rl_callback_handler_install resets the line buffer, thus losing
3686 input. */
3687
3688static void
d238133d 3689reinstall_readline_callback_handler_cleanup ()
d3d4baed 3690{
3b12939d
PA
3691 struct ui *ui = current_ui;
3692
3693 if (!ui->async)
6c400b59
PA
3694 {
3695 /* We're not going back to the top level event loop yet. Don't
3696 install the readline callback, as it'd prep the terminal,
3697 readline-style (raw, noecho) (e.g., --batch). We'll install
3698 it the next time the prompt is displayed, when we're ready
3699 for input. */
3700 return;
3701 }
3702
3b12939d 3703 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
d3d4baed
PA
3704 gdb_rl_callback_handler_reinstall ();
3705}
3706
243a9253
PA
3707/* Clean up the FSMs of threads that are now stopped. In non-stop,
3708 that's just the event thread. In all-stop, that's all threads. */
3709
3710static void
3711clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
3712{
08036331
PA
3713 if (ecs->event_thread != NULL
3714 && ecs->event_thread->thread_fsm != NULL)
46e3ed7f 3715 ecs->event_thread->thread_fsm->clean_up (ecs->event_thread);
243a9253
PA
3716
3717 if (!non_stop)
3718 {
08036331 3719 for (thread_info *thr : all_non_exited_threads ())
dda83cd7 3720 {
243a9253
PA
3721 if (thr->thread_fsm == NULL)
3722 continue;
3723 if (thr == ecs->event_thread)
3724 continue;
3725
00431a78 3726 switch_to_thread (thr);
46e3ed7f 3727 thr->thread_fsm->clean_up (thr);
243a9253
PA
3728 }
3729
3730 if (ecs->event_thread != NULL)
00431a78 3731 switch_to_thread (ecs->event_thread);
243a9253
PA
3732 }
3733}
3734
3b12939d
PA
3735/* Helper for all_uis_check_sync_execution_done that works on the
3736 current UI. */
3737
3738static void
3739check_curr_ui_sync_execution_done (void)
3740{
3741 struct ui *ui = current_ui;
3742
3743 if (ui->prompt_state == PROMPT_NEEDED
3744 && ui->async
3745 && !gdb_in_secondary_prompt_p (ui))
3746 {
223ffa71 3747 target_terminal::ours ();
76727919 3748 gdb::observers::sync_execution_done.notify ();
3eb7562a 3749 ui_register_input_event_handler (ui);
3b12939d
PA
3750 }
3751}
3752
3753/* See infrun.h. */
3754
3755void
3756all_uis_check_sync_execution_done (void)
3757{
0e454242 3758 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
3759 {
3760 check_curr_ui_sync_execution_done ();
3761 }
3762}
3763
a8836c93
PA
3764/* See infrun.h. */
3765
3766void
3767all_uis_on_sync_execution_starting (void)
3768{
0e454242 3769 SWITCH_THRU_ALL_UIS ()
a8836c93
PA
3770 {
3771 if (current_ui->prompt_state == PROMPT_NEEDED)
3772 async_disable_stdin ();
3773 }
3774}
3775
1777feb0 3776/* Asynchronous version of wait_for_inferior. It is called by the
43ff13b4 3777 event loop whenever a change of state is detected on the file
1777feb0
MS
3778 descriptor corresponding to the target. It can be called more than
3779 once to complete a single execution command. In such cases we need
3780 to keep the state in a global variable ECSS. If it is the last time
a474d7c2
PA
3781 that this function is called for a single execution command, then
3782 report to the user that the inferior has stopped, and do the
1777feb0 3783 necessary cleanups. */
43ff13b4
JM
3784
3785void
b1a35af2 3786fetch_inferior_event ()
43ff13b4 3787{
0d1e5fa7 3788 struct execution_control_state ecss;
a474d7c2 3789 struct execution_control_state *ecs = &ecss;
0f641c01 3790 int cmd_done = 0;
43ff13b4 3791
0d1e5fa7
PA
3792 memset (ecs, 0, sizeof (*ecs));
3793
c61db772
PA
3794 /* Events are always processed with the main UI as current UI. This
3795 way, warnings, debug output, etc. are always consistently sent to
3796 the main console. */
4b6749b9 3797 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
c61db772 3798
b78b3a29
TBA
3799 /* Temporarily disable pagination. Otherwise, the user would be
3800 given an option to press 'q' to quit, which would cause an early
3801 exit and could leave GDB in a half-baked state. */
3802 scoped_restore save_pagination
3803 = make_scoped_restore (&pagination_enabled, false);
3804
d3d4baed 3805 /* End up with readline processing input, if necessary. */
d238133d
TT
3806 {
3807 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
3808
3809 /* We're handling a live event, so make sure we're doing live
3810 debugging. If we're looking at traceframes while the target is
3811 running, we're going to need to get back to that mode after
3812 handling the event. */
3813 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
3814 if (non_stop)
3815 {
3816 maybe_restore_traceframe.emplace ();
3817 set_current_traceframe (-1);
3818 }
43ff13b4 3819
873657b9
PA
3820 /* The user/frontend should not notice a thread switch due to
3821 internal events. Make sure we revert to the user selected
3822 thread and frame after handling the event and running any
3823 breakpoint commands. */
3824 scoped_restore_current_thread restore_thread;
d238133d
TT
3825
3826 overlay_cache_invalid = 1;
3827 /* Flush target cache before starting to handle each event. Target
3828 was running and cache could be stale. This is just a heuristic.
3829 Running threads may modify target memory, but we don't get any
3830 event. */
3831 target_dcache_invalidate ();
3832
3833 scoped_restore save_exec_dir
3834 = make_scoped_restore (&execution_direction,
3835 target_execution_direction ());
3836
5b6d1e4f
PA
3837 if (!do_target_wait (minus_one_ptid, ecs, TARGET_WNOHANG))
3838 return;
3839
3840 gdb_assert (ecs->ws.kind != TARGET_WAITKIND_IGNORE);
3841
3842 /* Switch to the target that generated the event, so we can do
7f08fd51
TBA
3843 target calls. */
3844 switch_to_target_no_thread (ecs->target);
d238133d
TT
3845
3846 if (debug_infrun)
5b6d1e4f 3847 print_target_wait_results (minus_one_ptid, ecs->ptid, &ecs->ws);
d238133d
TT
3848
3849 /* If an error happens while handling the event, propagate GDB's
3850 knowledge of the executing state to the frontend/user running
3851 state. */
3852 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
5b6d1e4f 3853 scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
d238133d 3854
979a0d13 3855 /* Get executed before scoped_restore_current_thread above to apply
d238133d
TT
3856 still for the thread which has thrown the exception. */
3857 auto defer_bpstat_clear
3858 = make_scope_exit (bpstat_clear_actions);
3859 auto defer_delete_threads
3860 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
3861
3862 /* Now figure out what to do with the result of the result. */
3863 handle_inferior_event (ecs);
3864
3865 if (!ecs->wait_some_more)
3866 {
5b6d1e4f 3867 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
758cb810 3868 bool should_stop = true;
d238133d 3869 struct thread_info *thr = ecs->event_thread;
d6b48e9c 3870
d238133d 3871 delete_just_stopped_threads_infrun_breakpoints ();
f107f563 3872
d238133d
TT
3873 if (thr != NULL)
3874 {
3875 struct thread_fsm *thread_fsm = thr->thread_fsm;
243a9253 3876
d238133d 3877 if (thread_fsm != NULL)
46e3ed7f 3878 should_stop = thread_fsm->should_stop (thr);
d238133d 3879 }
243a9253 3880
d238133d
TT
3881 if (!should_stop)
3882 {
3883 keep_going (ecs);
3884 }
3885 else
3886 {
46e3ed7f 3887 bool should_notify_stop = true;
d238133d 3888 int proceeded = 0;
1840d81a 3889
d238133d 3890 clean_up_just_stopped_threads_fsms (ecs);
243a9253 3891
d238133d 3892 if (thr != NULL && thr->thread_fsm != NULL)
46e3ed7f 3893 should_notify_stop = thr->thread_fsm->should_notify_stop ();
388a7084 3894
d238133d
TT
3895 if (should_notify_stop)
3896 {
3897 /* We may not find an inferior if this was a process exit. */
3898 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3899 proceeded = normal_stop ();
3900 }
243a9253 3901
d238133d
TT
3902 if (!proceeded)
3903 {
b1a35af2 3904 inferior_event_handler (INF_EXEC_COMPLETE);
d238133d
TT
3905 cmd_done = 1;
3906 }
873657b9
PA
3907
3908 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
3909 previously selected thread is gone. We have two
3910 choices - switch to no thread selected, or restore the
3911 previously selected thread (now exited). We chose the
3912 later, just because that's what GDB used to do. After
3913 this, "info threads" says "The current thread <Thread
3914 ID 2> has terminated." instead of "No thread
3915 selected.". */
3916 if (!non_stop
3917 && cmd_done
3918 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3919 restore_thread.dont_restore ();
d238133d
TT
3920 }
3921 }
4f8d22e3 3922
d238133d
TT
3923 defer_delete_threads.release ();
3924 defer_bpstat_clear.release ();
29f49a6a 3925
d238133d
TT
3926 /* No error, don't finish the thread states yet. */
3927 finish_state.release ();
731f534f 3928
d238133d
TT
3929 /* This scope is used to ensure that readline callbacks are
3930 reinstalled here. */
3931 }
4f8d22e3 3932
3b12939d
PA
3933 /* If a UI was in sync execution mode, and now isn't, restore its
3934 prompt (a synchronous execution command has finished, and we're
3935 ready for input). */
3936 all_uis_check_sync_execution_done ();
0f641c01
PA
3937
3938 if (cmd_done
0f641c01 3939 && exec_done_display_p
00431a78
PA
3940 && (inferior_ptid == null_ptid
3941 || inferior_thread ()->state != THREAD_RUNNING))
0f641c01 3942 printf_unfiltered (_("completed.\n"));
43ff13b4
JM
3943}
3944
29734269
SM
3945/* See infrun.h. */
3946
edb3359d 3947void
29734269
SM
3948set_step_info (thread_info *tp, struct frame_info *frame,
3949 struct symtab_and_line sal)
edb3359d 3950{
29734269
SM
3951 /* This can be removed once this function no longer implicitly relies on the
3952 inferior_ptid value. */
3953 gdb_assert (inferior_ptid == tp->ptid);
edb3359d 3954
16c381f0
JK
3955 tp->control.step_frame_id = get_frame_id (frame);
3956 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
edb3359d
DJ
3957
3958 tp->current_symtab = sal.symtab;
3959 tp->current_line = sal.line;
3960}
3961
0d1e5fa7
PA
3962/* Clear context switchable stepping state. */
3963
3964void
4e1c45ea 3965init_thread_stepping_state (struct thread_info *tss)
0d1e5fa7 3966{
7f5ef605 3967 tss->stepped_breakpoint = 0;
0d1e5fa7 3968 tss->stepping_over_breakpoint = 0;
963f9c80 3969 tss->stepping_over_watchpoint = 0;
0d1e5fa7 3970 tss->step_after_step_resume_breakpoint = 0;
cd0fc7c3
SS
3971}
3972
ab1ddbcf 3973/* See infrun.h. */
c32c64b7 3974
6efcd9a8 3975void
5b6d1e4f
PA
3976set_last_target_status (process_stratum_target *target, ptid_t ptid,
3977 target_waitstatus status)
c32c64b7 3978{
5b6d1e4f 3979 target_last_proc_target = target;
c32c64b7
DE
3980 target_last_wait_ptid = ptid;
3981 target_last_waitstatus = status;
3982}
3983
ab1ddbcf 3984/* See infrun.h. */
e02bc4cc
DS
3985
3986void
5b6d1e4f
PA
3987get_last_target_status (process_stratum_target **target, ptid_t *ptid,
3988 target_waitstatus *status)
e02bc4cc 3989{
5b6d1e4f
PA
3990 if (target != nullptr)
3991 *target = target_last_proc_target;
ab1ddbcf
PA
3992 if (ptid != nullptr)
3993 *ptid = target_last_wait_ptid;
3994 if (status != nullptr)
3995 *status = target_last_waitstatus;
e02bc4cc
DS
3996}
3997
ab1ddbcf
PA
3998/* See infrun.h. */
3999
ac264b3b
MS
4000void
4001nullify_last_target_wait_ptid (void)
4002{
5b6d1e4f 4003 target_last_proc_target = nullptr;
ac264b3b 4004 target_last_wait_ptid = minus_one_ptid;
ab1ddbcf 4005 target_last_waitstatus = {};
ac264b3b
MS
4006}
4007
dcf4fbde 4008/* Switch thread contexts. */
dd80620e
MS
4009
4010static void
00431a78 4011context_switch (execution_control_state *ecs)
dd80620e 4012{
1eb8556f 4013 if (ecs->ptid != inferior_ptid
5b6d1e4f
PA
4014 && (inferior_ptid == null_ptid
4015 || ecs->event_thread != inferior_thread ()))
fd48f117 4016 {
1eb8556f
SM
4017 infrun_debug_printf ("Switching context from %s to %s",
4018 target_pid_to_str (inferior_ptid).c_str (),
4019 target_pid_to_str (ecs->ptid).c_str ());
fd48f117
DJ
4020 }
4021
00431a78 4022 switch_to_thread (ecs->event_thread);
dd80620e
MS
4023}
4024
d8dd4d5f
PA
4025/* If the target can't tell whether we've hit breakpoints
4026 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4027 check whether that could have been caused by a breakpoint. If so,
4028 adjust the PC, per gdbarch_decr_pc_after_break. */
4029
4fa8626c 4030static void
d8dd4d5f
PA
4031adjust_pc_after_break (struct thread_info *thread,
4032 struct target_waitstatus *ws)
4fa8626c 4033{
24a73cce
UW
4034 struct regcache *regcache;
4035 struct gdbarch *gdbarch;
118e6252 4036 CORE_ADDR breakpoint_pc, decr_pc;
4fa8626c 4037
4fa8626c
DJ
4038 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4039 we aren't, just return.
9709f61c
DJ
4040
4041 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
b798847d
UW
4042 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4043 implemented by software breakpoints should be handled through the normal
4044 breakpoint layer.
8fb3e588 4045
4fa8626c
DJ
4046 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4047 different signals (SIGILL or SIGEMT for instance), but it is less
4048 clear where the PC is pointing afterwards. It may not match
b798847d
UW
4049 gdbarch_decr_pc_after_break. I don't know any specific target that
4050 generates these signals at breakpoints (the code has been in GDB since at
4051 least 1992) so I can not guess how to handle them here.
8fb3e588 4052
e6cf7916
UW
4053 In earlier versions of GDB, a target with
4054 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
b798847d
UW
4055 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4056 target with both of these set in GDB history, and it seems unlikely to be
4057 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4fa8626c 4058
d8dd4d5f 4059 if (ws->kind != TARGET_WAITKIND_STOPPED)
4fa8626c
DJ
4060 return;
4061
d8dd4d5f 4062 if (ws->value.sig != GDB_SIGNAL_TRAP)
4fa8626c
DJ
4063 return;
4064
4058b839
PA
4065 /* In reverse execution, when a breakpoint is hit, the instruction
4066 under it has already been de-executed. The reported PC always
4067 points at the breakpoint address, so adjusting it further would
4068 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4069 architecture:
4070
4071 B1 0x08000000 : INSN1
4072 B2 0x08000001 : INSN2
4073 0x08000002 : INSN3
4074 PC -> 0x08000003 : INSN4
4075
4076 Say you're stopped at 0x08000003 as above. Reverse continuing
4077 from that point should hit B2 as below. Reading the PC when the
4078 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4079 been de-executed already.
4080
4081 B1 0x08000000 : INSN1
4082 B2 PC -> 0x08000001 : INSN2
4083 0x08000002 : INSN3
4084 0x08000003 : INSN4
4085
4086 We can't apply the same logic as for forward execution, because
4087 we would wrongly adjust the PC to 0x08000000, since there's a
4088 breakpoint at PC - 1. We'd then report a hit on B1, although
4089 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4090 behaviour. */
4091 if (execution_direction == EXEC_REVERSE)
4092 return;
4093
1cf4d951
PA
4094 /* If the target can tell whether the thread hit a SW breakpoint,
4095 trust it. Targets that can tell also adjust the PC
4096 themselves. */
4097 if (target_supports_stopped_by_sw_breakpoint ())
4098 return;
4099
4100 /* Note that relying on whether a breakpoint is planted in memory to
4101 determine this can fail. E.g,. the breakpoint could have been
4102 removed since. Or the thread could have been told to step an
4103 instruction the size of a breakpoint instruction, and only
4104 _after_ was a breakpoint inserted at its address. */
4105
24a73cce
UW
4106 /* If this target does not decrement the PC after breakpoints, then
4107 we have nothing to do. */
00431a78 4108 regcache = get_thread_regcache (thread);
ac7936df 4109 gdbarch = regcache->arch ();
118e6252 4110
527a273a 4111 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
118e6252 4112 if (decr_pc == 0)
24a73cce
UW
4113 return;
4114
8b86c959 4115 const address_space *aspace = regcache->aspace ();
6c95b8df 4116
8aad930b
AC
4117 /* Find the location where (if we've hit a breakpoint) the
4118 breakpoint would be. */
118e6252 4119 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
8aad930b 4120
1cf4d951
PA
4121 /* If the target can't tell whether a software breakpoint triggered,
4122 fallback to figuring it out based on breakpoints we think were
4123 inserted in the target, and on whether the thread was stepped or
4124 continued. */
4125
1c5cfe86
PA
4126 /* Check whether there actually is a software breakpoint inserted at
4127 that location.
4128
4129 If in non-stop mode, a race condition is possible where we've
4130 removed a breakpoint, but stop events for that breakpoint were
4131 already queued and arrive later. To suppress those spurious
4132 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
1cf4d951
PA
4133 and retire them after a number of stop events are reported. Note
4134 this is an heuristic and can thus get confused. The real fix is
4135 to get the "stopped by SW BP and needs adjustment" info out of
4136 the target/kernel (and thus never reach here; see above). */
6c95b8df 4137 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
fbea99ea
PA
4138 || (target_is_non_stop_p ()
4139 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
8aad930b 4140 {
07036511 4141 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
abbb1732 4142
8213266a 4143 if (record_full_is_used ())
07036511
TT
4144 restore_operation_disable.emplace
4145 (record_full_gdb_operation_disable_set ());
96429cc8 4146
1c0fdd0e
UW
4147 /* When using hardware single-step, a SIGTRAP is reported for both
4148 a completed single-step and a software breakpoint. Need to
4149 differentiate between the two, as the latter needs adjusting
4150 but the former does not.
4151
4152 The SIGTRAP can be due to a completed hardware single-step only if
4153 - we didn't insert software single-step breakpoints
1c0fdd0e
UW
4154 - this thread is currently being stepped
4155
4156 If any of these events did not occur, we must have stopped due
4157 to hitting a software breakpoint, and have to back up to the
4158 breakpoint address.
4159
4160 As a special case, we could have hardware single-stepped a
4161 software breakpoint. In this case (prev_pc == breakpoint_pc),
4162 we also need to back up to the breakpoint address. */
4163
d8dd4d5f
PA
4164 if (thread_has_single_step_breakpoints_set (thread)
4165 || !currently_stepping (thread)
4166 || (thread->stepped_breakpoint
4167 && thread->prev_pc == breakpoint_pc))
515630c5 4168 regcache_write_pc (regcache, breakpoint_pc);
8aad930b 4169 }
4fa8626c
DJ
4170}
4171
c4464ade 4172static bool
edb3359d
DJ
4173stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
4174{
4175 for (frame = get_prev_frame (frame);
4176 frame != NULL;
4177 frame = get_prev_frame (frame))
4178 {
4179 if (frame_id_eq (get_frame_id (frame), step_frame_id))
c4464ade
SM
4180 return true;
4181
edb3359d
DJ
4182 if (get_frame_type (frame) != INLINE_FRAME)
4183 break;
4184 }
4185
c4464ade 4186 return false;
edb3359d
DJ
4187}
4188
4a4c04f1
BE
4189/* Look for an inline frame that is marked for skip.
4190 If PREV_FRAME is TRUE start at the previous frame,
4191 otherwise start at the current frame. Stop at the
4192 first non-inline frame, or at the frame where the
4193 step started. */
4194
4195static bool
4196inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4197{
4198 struct frame_info *frame = get_current_frame ();
4199
4200 if (prev_frame)
4201 frame = get_prev_frame (frame);
4202
4203 for (; frame != NULL; frame = get_prev_frame (frame))
4204 {
4205 const char *fn = NULL;
4206 symtab_and_line sal;
4207 struct symbol *sym;
4208
4209 if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
4210 break;
4211 if (get_frame_type (frame) != INLINE_FRAME)
4212 break;
4213
4214 sal = find_frame_sal (frame);
4215 sym = get_frame_function (frame);
4216
4217 if (sym != NULL)
4218 fn = sym->print_name ();
4219
4220 if (sal.line != 0
4221 && function_name_is_marked_for_skip (fn, sal))
4222 return true;
4223 }
4224
4225 return false;
4226}
4227
c65d6b55
PA
4228/* If the event thread has the stop requested flag set, pretend it
4229 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4230 target_stop). */
4231
4232static bool
4233handle_stop_requested (struct execution_control_state *ecs)
4234{
4235 if (ecs->event_thread->stop_requested)
4236 {
4237 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
4238 ecs->ws.value.sig = GDB_SIGNAL_0;
4239 handle_signal_stop (ecs);
4240 return true;
4241 }
4242 return false;
4243}
4244
a96d9b2e 4245/* Auxiliary function that handles syscall entry/return events.
c4464ade
SM
4246 It returns true if the inferior should keep going (and GDB
4247 should ignore the event), or false if the event deserves to be
a96d9b2e 4248 processed. */
ca2163eb 4249
c4464ade 4250static bool
ca2163eb 4251handle_syscall_event (struct execution_control_state *ecs)
a96d9b2e 4252{
ca2163eb 4253 struct regcache *regcache;
ca2163eb
PA
4254 int syscall_number;
4255
00431a78 4256 context_switch (ecs);
ca2163eb 4257
00431a78 4258 regcache = get_thread_regcache (ecs->event_thread);
f90263c1 4259 syscall_number = ecs->ws.value.syscall_number;
f2ffa92b 4260 ecs->event_thread->suspend.stop_pc = regcache_read_pc (regcache);
ca2163eb 4261
a96d9b2e
SDJ
4262 if (catch_syscall_enabled () > 0
4263 && catching_syscall_number (syscall_number) > 0)
4264 {
1eb8556f 4265 infrun_debug_printf ("syscall number=%d", syscall_number);
a96d9b2e 4266
16c381f0 4267 ecs->event_thread->control.stop_bpstat
a01bda52 4268 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
4269 ecs->event_thread->suspend.stop_pc,
4270 ecs->event_thread, &ecs->ws);
ab04a2af 4271
c65d6b55 4272 if (handle_stop_requested (ecs))
c4464ade 4273 return false;
c65d6b55 4274
ce12b012 4275 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
ca2163eb
PA
4276 {
4277 /* Catchpoint hit. */
c4464ade 4278 return false;
ca2163eb 4279 }
a96d9b2e 4280 }
ca2163eb 4281
c65d6b55 4282 if (handle_stop_requested (ecs))
c4464ade 4283 return false;
c65d6b55 4284
ca2163eb 4285 /* If no catchpoint triggered for this, then keep going. */
ca2163eb 4286 keep_going (ecs);
c4464ade
SM
4287
4288 return true;
a96d9b2e
SDJ
4289}
4290
7e324e48
GB
4291/* Lazily fill in the execution_control_state's stop_func_* fields. */
4292
4293static void
4294fill_in_stop_func (struct gdbarch *gdbarch,
4295 struct execution_control_state *ecs)
4296{
4297 if (!ecs->stop_func_filled_in)
4298 {
98a617f8 4299 const block *block;
fe830662 4300 const general_symbol_info *gsi;
98a617f8 4301
7e324e48
GB
4302 /* Don't care about return value; stop_func_start and stop_func_name
4303 will both be 0 if it doesn't work. */
fe830662
TT
4304 find_pc_partial_function_sym (ecs->event_thread->suspend.stop_pc,
4305 &gsi,
4306 &ecs->stop_func_start,
4307 &ecs->stop_func_end,
4308 &block);
4309 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
98a617f8
KB
4310
4311 /* The call to find_pc_partial_function, above, will set
4312 stop_func_start and stop_func_end to the start and end
4313 of the range containing the stop pc. If this range
4314 contains the entry pc for the block (which is always the
4315 case for contiguous blocks), advance stop_func_start past
4316 the function's start offset and entrypoint. Note that
4317 stop_func_start is NOT advanced when in a range of a
4318 non-contiguous block that does not contain the entry pc. */
4319 if (block != nullptr
4320 && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
4321 && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
4322 {
4323 ecs->stop_func_start
4324 += gdbarch_deprecated_function_start_offset (gdbarch);
4325
4326 if (gdbarch_skip_entrypoint_p (gdbarch))
4327 ecs->stop_func_start
4328 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
4329 }
591a12a1 4330
7e324e48
GB
4331 ecs->stop_func_filled_in = 1;
4332 }
4333}
4334
4f5d7f63 4335
00431a78 4336/* Return the STOP_SOON field of the inferior pointed at by ECS. */
4f5d7f63
PA
4337
4338static enum stop_kind
00431a78 4339get_inferior_stop_soon (execution_control_state *ecs)
4f5d7f63 4340{
5b6d1e4f 4341 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
4f5d7f63
PA
4342
4343 gdb_assert (inf != NULL);
4344 return inf->control.stop_soon;
4345}
4346
5b6d1e4f
PA
4347/* Poll for one event out of the current target. Store the resulting
4348 waitstatus in WS, and return the event ptid. Does not block. */
372316f1
PA
4349
4350static ptid_t
5b6d1e4f 4351poll_one_curr_target (struct target_waitstatus *ws)
372316f1
PA
4352{
4353 ptid_t event_ptid;
372316f1
PA
4354
4355 overlay_cache_invalid = 1;
4356
4357 /* Flush target cache before starting to handle each event.
4358 Target was running and cache could be stale. This is just a
4359 heuristic. Running threads may modify target memory, but we
4360 don't get any event. */
4361 target_dcache_invalidate ();
4362
4363 if (deprecated_target_wait_hook)
5b6d1e4f 4364 event_ptid = deprecated_target_wait_hook (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1 4365 else
5b6d1e4f 4366 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
372316f1
PA
4367
4368 if (debug_infrun)
5b6d1e4f 4369 print_target_wait_results (minus_one_ptid, event_ptid, ws);
372316f1
PA
4370
4371 return event_ptid;
4372}
4373
5b6d1e4f
PA
4374/* An event reported by wait_one. */
4375
4376struct wait_one_event
4377{
4378 /* The target the event came out of. */
4379 process_stratum_target *target;
4380
4381 /* The PTID the event was for. */
4382 ptid_t ptid;
4383
4384 /* The waitstatus. */
4385 target_waitstatus ws;
4386};
4387
4388/* Wait for one event out of any target. */
4389
4390static wait_one_event
4391wait_one ()
4392{
4393 while (1)
4394 {
4395 for (inferior *inf : all_inferiors ())
4396 {
4397 process_stratum_target *target = inf->process_target ();
4398 if (target == NULL
4399 || !target->is_async_p ()
4400 || !target->threads_executing)
4401 continue;
4402
4403 switch_to_inferior_no_thread (inf);
4404
4405 wait_one_event event;
4406 event.target = target;
4407 event.ptid = poll_one_curr_target (&event.ws);
4408
4409 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
4410 {
4411 /* If nothing is resumed, remove the target from the
4412 event loop. */
4413 target_async (0);
4414 }
4415 else if (event.ws.kind != TARGET_WAITKIND_IGNORE)
4416 return event;
4417 }
4418
4419 /* Block waiting for some event. */
4420
4421 fd_set readfds;
4422 int nfds = 0;
4423
4424 FD_ZERO (&readfds);
4425
4426 for (inferior *inf : all_inferiors ())
4427 {
4428 process_stratum_target *target = inf->process_target ();
4429 if (target == NULL
4430 || !target->is_async_p ()
4431 || !target->threads_executing)
4432 continue;
4433
4434 int fd = target->async_wait_fd ();
4435 FD_SET (fd, &readfds);
4436 if (nfds <= fd)
4437 nfds = fd + 1;
4438 }
4439
4440 if (nfds == 0)
4441 {
4442 /* No waitable targets left. All must be stopped. */
4443 return {NULL, minus_one_ptid, {TARGET_WAITKIND_NO_RESUMED}};
4444 }
4445
4446 QUIT;
4447
4448 int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
4449 if (numfds < 0)
4450 {
4451 if (errno == EINTR)
4452 continue;
4453 else
4454 perror_with_name ("interruptible_select");
4455 }
4456 }
4457}
4458
372316f1
PA
4459/* Save the thread's event and stop reason to process it later. */
4460
4461static void
5b6d1e4f 4462save_waitstatus (struct thread_info *tp, const target_waitstatus *ws)
372316f1 4463{
1eb8556f
SM
4464 infrun_debug_printf ("saving status %s for %d.%ld.%ld",
4465 target_waitstatus_to_string (ws).c_str (),
4466 tp->ptid.pid (),
4467 tp->ptid.lwp (),
4468 tp->ptid.tid ());
372316f1
PA
4469
4470 /* Record for later. */
4471 tp->suspend.waitstatus = *ws;
4472 tp->suspend.waitstatus_pending_p = 1;
4473
00431a78 4474 struct regcache *regcache = get_thread_regcache (tp);
8b86c959 4475 const address_space *aspace = regcache->aspace ();
372316f1
PA
4476
4477 if (ws->kind == TARGET_WAITKIND_STOPPED
4478 && ws->value.sig == GDB_SIGNAL_TRAP)
4479 {
4480 CORE_ADDR pc = regcache_read_pc (regcache);
4481
4482 adjust_pc_after_break (tp, &tp->suspend.waitstatus);
4483
18493a00
PA
4484 scoped_restore_current_thread restore_thread;
4485 switch_to_thread (tp);
4486
4487 if (target_stopped_by_watchpoint ())
372316f1
PA
4488 {
4489 tp->suspend.stop_reason
4490 = TARGET_STOPPED_BY_WATCHPOINT;
4491 }
4492 else if (target_supports_stopped_by_sw_breakpoint ()
18493a00 4493 && target_stopped_by_sw_breakpoint ())
372316f1
PA
4494 {
4495 tp->suspend.stop_reason
4496 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4497 }
4498 else if (target_supports_stopped_by_hw_breakpoint ()
18493a00 4499 && target_stopped_by_hw_breakpoint ())
372316f1
PA
4500 {
4501 tp->suspend.stop_reason
4502 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4503 }
4504 else if (!target_supports_stopped_by_hw_breakpoint ()
4505 && hardware_breakpoint_inserted_here_p (aspace,
4506 pc))
4507 {
4508 tp->suspend.stop_reason
4509 = TARGET_STOPPED_BY_HW_BREAKPOINT;
4510 }
4511 else if (!target_supports_stopped_by_sw_breakpoint ()
4512 && software_breakpoint_inserted_here_p (aspace,
4513 pc))
4514 {
4515 tp->suspend.stop_reason
4516 = TARGET_STOPPED_BY_SW_BREAKPOINT;
4517 }
4518 else if (!thread_has_single_step_breakpoints_set (tp)
4519 && currently_stepping (tp))
4520 {
4521 tp->suspend.stop_reason
4522 = TARGET_STOPPED_BY_SINGLE_STEP;
4523 }
4524 }
4525}
4526
293b3ebc
TBA
4527/* Mark the non-executing threads accordingly. In all-stop, all
4528 threads of all processes are stopped when we get any event
4529 reported. In non-stop mode, only the event thread stops. */
4530
4531static void
4532mark_non_executing_threads (process_stratum_target *target,
4533 ptid_t event_ptid,
4534 struct target_waitstatus ws)
4535{
4536 ptid_t mark_ptid;
4537
4538 if (!target_is_non_stop_p ())
4539 mark_ptid = minus_one_ptid;
4540 else if (ws.kind == TARGET_WAITKIND_SIGNALLED
4541 || ws.kind == TARGET_WAITKIND_EXITED)
4542 {
4543 /* If we're handling a process exit in non-stop mode, even
4544 though threads haven't been deleted yet, one would think
4545 that there is nothing to do, as threads of the dead process
4546 will be soon deleted, and threads of any other process were
4547 left running. However, on some targets, threads survive a
4548 process exit event. E.g., for the "checkpoint" command,
4549 when the current checkpoint/fork exits, linux-fork.c
4550 automatically switches to another fork from within
4551 target_mourn_inferior, by associating the same
4552 inferior/thread to another fork. We haven't mourned yet at
4553 this point, but we must mark any threads left in the
4554 process as not-executing so that finish_thread_state marks
4555 them stopped (in the user's perspective) if/when we present
4556 the stop to the user. */
4557 mark_ptid = ptid_t (event_ptid.pid ());
4558 }
4559 else
4560 mark_ptid = event_ptid;
4561
4562 set_executing (target, mark_ptid, false);
4563
4564 /* Likewise the resumed flag. */
4565 set_resumed (target, mark_ptid, false);
4566}
4567
6efcd9a8 4568/* See infrun.h. */
372316f1 4569
6efcd9a8 4570void
372316f1
PA
4571stop_all_threads (void)
4572{
4573 /* We may need multiple passes to discover all threads. */
4574 int pass;
4575 int iterations = 0;
372316f1 4576
53cccef1 4577 gdb_assert (exists_non_stop_target ());
372316f1 4578
1eb8556f 4579 infrun_debug_printf ("starting");
372316f1 4580
00431a78 4581 scoped_restore_current_thread restore_thread;
372316f1 4582
6ad82919
TBA
4583 /* Enable thread events of all targets. */
4584 for (auto *target : all_non_exited_process_targets ())
4585 {
4586 switch_to_target_no_thread (target);
4587 target_thread_events (true);
4588 }
4589
4590 SCOPE_EXIT
4591 {
4592 /* Disable thread events of all targets. */
4593 for (auto *target : all_non_exited_process_targets ())
4594 {
4595 switch_to_target_no_thread (target);
4596 target_thread_events (false);
4597 }
4598
17417fb0 4599 /* Use debug_prefixed_printf directly to get a meaningful function
dda83cd7 4600 name. */
6ad82919 4601 if (debug_infrun)
17417fb0 4602 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
6ad82919 4603 };
65706a29 4604
372316f1
PA
4605 /* Request threads to stop, and then wait for the stops. Because
4606 threads we already know about can spawn more threads while we're
4607 trying to stop them, and we only learn about new threads when we
4608 update the thread list, do this in a loop, and keep iterating
4609 until two passes find no threads that need to be stopped. */
4610 for (pass = 0; pass < 2; pass++, iterations++)
4611 {
1eb8556f 4612 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
372316f1
PA
4613 while (1)
4614 {
29d6859f 4615 int waits_needed = 0;
372316f1 4616
a05575d3
TBA
4617 for (auto *target : all_non_exited_process_targets ())
4618 {
4619 switch_to_target_no_thread (target);
4620 update_thread_list ();
4621 }
372316f1
PA
4622
4623 /* Go through all threads looking for threads that we need
4624 to tell the target to stop. */
08036331 4625 for (thread_info *t : all_non_exited_threads ())
372316f1 4626 {
53cccef1
TBA
4627 /* For a single-target setting with an all-stop target,
4628 we would not even arrive here. For a multi-target
4629 setting, until GDB is able to handle a mixture of
4630 all-stop and non-stop targets, simply skip all-stop
4631 targets' threads. This should be fine due to the
4632 protection of 'check_multi_target_resumption'. */
4633
4634 switch_to_thread_no_regs (t);
4635 if (!target_is_non_stop_p ())
4636 continue;
4637
372316f1
PA
4638 if (t->executing)
4639 {
4640 /* If already stopping, don't request a stop again.
4641 We just haven't seen the notification yet. */
4642 if (!t->stop_requested)
4643 {
1eb8556f
SM
4644 infrun_debug_printf (" %s executing, need stop",
4645 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4646 target_stop (t->ptid);
4647 t->stop_requested = 1;
4648 }
4649 else
4650 {
1eb8556f
SM
4651 infrun_debug_printf (" %s executing, already stopping",
4652 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4653 }
4654
4655 if (t->stop_requested)
29d6859f 4656 waits_needed++;
372316f1
PA
4657 }
4658 else
4659 {
1eb8556f
SM
4660 infrun_debug_printf (" %s not executing",
4661 target_pid_to_str (t->ptid).c_str ());
372316f1
PA
4662
4663 /* The thread may be not executing, but still be
4664 resumed with a pending status to process. */
719546c4 4665 t->resumed = false;
372316f1
PA
4666 }
4667 }
4668
29d6859f 4669 if (waits_needed == 0)
372316f1
PA
4670 break;
4671
4672 /* If we find new threads on the second iteration, restart
4673 over. We want to see two iterations in a row with all
4674 threads stopped. */
4675 if (pass > 0)
4676 pass = -1;
4677
29d6859f 4678 for (int i = 0; i < waits_needed; i++)
c29705b7 4679 {
29d6859f 4680 wait_one_event event = wait_one ();
a05575d3 4681
1eb8556f
SM
4682 infrun_debug_printf
4683 ("%s %s", target_waitstatus_to_string (&event.ws).c_str (),
4684 target_pid_to_str (event.ptid).c_str ());
a05575d3 4685
29d6859f 4686 if (event.ws.kind == TARGET_WAITKIND_NO_RESUMED)
a05575d3 4687 {
29d6859f
LM
4688 /* All resumed threads exited. */
4689 break;
a05575d3 4690 }
29d6859f
LM
4691 else if (event.ws.kind == TARGET_WAITKIND_THREAD_EXITED
4692 || event.ws.kind == TARGET_WAITKIND_EXITED
4693 || event.ws.kind == TARGET_WAITKIND_SIGNALLED)
6efcd9a8 4694 {
29d6859f 4695 /* One thread/process exited/signalled. */
6efcd9a8 4696
29d6859f 4697 thread_info *t = nullptr;
372316f1 4698
29d6859f
LM
4699 /* The target may have reported just a pid. If so, try
4700 the first non-exited thread. */
4701 if (event.ptid.is_pid ())
372316f1 4702 {
29d6859f
LM
4703 int pid = event.ptid.pid ();
4704 inferior *inf = find_inferior_pid (event.target, pid);
4705 for (thread_info *tp : inf->non_exited_threads ())
372316f1 4706 {
29d6859f
LM
4707 t = tp;
4708 break;
372316f1 4709 }
29d6859f
LM
4710
4711 /* If there is no available thread, the event would
4712 have to be appended to a per-inferior event list,
4713 which does not exist (and if it did, we'd have
4714 to adjust run control command to be able to
4715 resume such an inferior). We assert here instead
4716 of going into an infinite loop. */
4717 gdb_assert (t != nullptr);
4718
1eb8556f
SM
4719 infrun_debug_printf
4720 ("using %s", target_pid_to_str (t->ptid).c_str ());
29d6859f
LM
4721 }
4722 else
4723 {
4724 t = find_thread_ptid (event.target, event.ptid);
4725 /* Check if this is the first time we see this thread.
4726 Don't bother adding if it individually exited. */
4727 if (t == nullptr
4728 && event.ws.kind != TARGET_WAITKIND_THREAD_EXITED)
4729 t = add_thread (event.target, event.ptid);
4730 }
4731
4732 if (t != nullptr)
4733 {
4734 /* Set the threads as non-executing to avoid
4735 another stop attempt on them. */
4736 switch_to_thread_no_regs (t);
4737 mark_non_executing_threads (event.target, event.ptid,
4738 event.ws);
4739 save_waitstatus (t, &event.ws);
4740 t->stop_requested = false;
372316f1
PA
4741 }
4742 }
4743 else
4744 {
29d6859f
LM
4745 thread_info *t = find_thread_ptid (event.target, event.ptid);
4746 if (t == NULL)
4747 t = add_thread (event.target, event.ptid);
372316f1 4748
29d6859f
LM
4749 t->stop_requested = 0;
4750 t->executing = 0;
4751 t->resumed = false;
4752 t->control.may_range_step = 0;
4753
4754 /* This may be the first time we see the inferior report
4755 a stop. */
4756 inferior *inf = find_inferior_ptid (event.target, event.ptid);
4757 if (inf->needs_setup)
372316f1 4758 {
29d6859f
LM
4759 switch_to_thread_no_regs (t);
4760 setup_inferior (0);
372316f1
PA
4761 }
4762
29d6859f
LM
4763 if (event.ws.kind == TARGET_WAITKIND_STOPPED
4764 && event.ws.value.sig == GDB_SIGNAL_0)
372316f1 4765 {
29d6859f
LM
4766 /* We caught the event that we intended to catch, so
4767 there's no event pending. */
4768 t->suspend.waitstatus.kind = TARGET_WAITKIND_IGNORE;
4769 t->suspend.waitstatus_pending_p = 0;
4770
bab37966
SM
4771 if (displaced_step_finish (t, GDB_SIGNAL_0)
4772 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
29d6859f
LM
4773 {
4774 /* Add it back to the step-over queue. */
1eb8556f
SM
4775 infrun_debug_printf
4776 ("displaced-step of %s canceled: adding back to "
4777 "the step-over queue",
4778 target_pid_to_str (t->ptid).c_str ());
4779
29d6859f 4780 t->control.trap_expected = 0;
28d5518b 4781 global_thread_step_over_chain_enqueue (t);
29d6859f 4782 }
372316f1 4783 }
29d6859f
LM
4784 else
4785 {
4786 enum gdb_signal sig;
4787 struct regcache *regcache;
372316f1 4788
1eb8556f
SM
4789 infrun_debug_printf
4790 ("target_wait %s, saving status for %d.%ld.%ld",
4791 target_waitstatus_to_string (&event.ws).c_str (),
4792 t->ptid.pid (), t->ptid.lwp (), t->ptid.tid ());
29d6859f
LM
4793
4794 /* Record for later. */
4795 save_waitstatus (t, &event.ws);
4796
4797 sig = (event.ws.kind == TARGET_WAITKIND_STOPPED
4798 ? event.ws.value.sig : GDB_SIGNAL_0);
4799
bab37966
SM
4800 if (displaced_step_finish (t, sig)
4801 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
29d6859f
LM
4802 {
4803 /* Add it back to the step-over queue. */
4804 t->control.trap_expected = 0;
28d5518b 4805 global_thread_step_over_chain_enqueue (t);
29d6859f
LM
4806 }
4807
4808 regcache = get_thread_regcache (t);
4809 t->suspend.stop_pc = regcache_read_pc (regcache);
4810
1eb8556f
SM
4811 infrun_debug_printf ("saved stop_pc=%s for %s "
4812 "(currently_stepping=%d)",
4813 paddress (target_gdbarch (),
4814 t->suspend.stop_pc),
4815 target_pid_to_str (t->ptid).c_str (),
4816 currently_stepping (t));
372316f1
PA
4817 }
4818 }
4819 }
4820 }
4821 }
372316f1
PA
4822}
4823
f4836ba9
PA
4824/* Handle a TARGET_WAITKIND_NO_RESUMED event. */
4825
c4464ade 4826static bool
f4836ba9
PA
4827handle_no_resumed (struct execution_control_state *ecs)
4828{
3b12939d 4829 if (target_can_async_p ())
f4836ba9 4830 {
c4464ade 4831 bool any_sync = false;
f4836ba9 4832
2dab0c7b 4833 for (ui *ui : all_uis ())
3b12939d
PA
4834 {
4835 if (ui->prompt_state == PROMPT_BLOCKED)
4836 {
c4464ade 4837 any_sync = true;
3b12939d
PA
4838 break;
4839 }
4840 }
4841 if (!any_sync)
4842 {
4843 /* There were no unwaited-for children left in the target, but,
4844 we're not synchronously waiting for events either. Just
4845 ignore. */
4846
1eb8556f 4847 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
3b12939d 4848 prepare_to_wait (ecs);
c4464ade 4849 return true;
3b12939d 4850 }
f4836ba9
PA
4851 }
4852
4853 /* Otherwise, if we were running a synchronous execution command, we
4854 may need to cancel it and give the user back the terminal.
4855
4856 In non-stop mode, the target can't tell whether we've already
4857 consumed previous stop events, so it can end up sending us a
4858 no-resumed event like so:
4859
4860 #0 - thread 1 is left stopped
4861
4862 #1 - thread 2 is resumed and hits breakpoint
dda83cd7 4863 -> TARGET_WAITKIND_STOPPED
f4836ba9
PA
4864
4865 #2 - thread 3 is resumed and exits
dda83cd7 4866 this is the last resumed thread, so
f4836ba9
PA
4867 -> TARGET_WAITKIND_NO_RESUMED
4868
4869 #3 - gdb processes stop for thread 2 and decides to re-resume
dda83cd7 4870 it.
f4836ba9
PA
4871
4872 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
dda83cd7 4873 thread 2 is now resumed, so the event should be ignored.
f4836ba9
PA
4874
4875 IOW, if the stop for thread 2 doesn't end a foreground command,
4876 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
4877 event. But it could be that the event meant that thread 2 itself
4878 (or whatever other thread was the last resumed thread) exited.
4879
4880 To address this we refresh the thread list and check whether we
4881 have resumed threads _now_. In the example above, this removes
4882 thread 3 from the thread list. If thread 2 was re-resumed, we
4883 ignore this event. If we find no thread resumed, then we cancel
7d3badc6
PA
4884 the synchronous command and show "no unwaited-for " to the
4885 user. */
f4836ba9 4886
d6cc5d98 4887 inferior *curr_inf = current_inferior ();
7d3badc6 4888
d6cc5d98
PA
4889 scoped_restore_current_thread restore_thread;
4890
4891 for (auto *target : all_non_exited_process_targets ())
4892 {
4893 switch_to_target_no_thread (target);
4894 update_thread_list ();
4895 }
4896
4897 /* If:
4898
4899 - the current target has no thread executing, and
4900 - the current inferior is native, and
4901 - the current inferior is the one which has the terminal, and
4902 - we did nothing,
4903
4904 then a Ctrl-C from this point on would remain stuck in the
4905 kernel, until a thread resumes and dequeues it. That would
4906 result in the GDB CLI not reacting to Ctrl-C, not able to
4907 interrupt the program. To address this, if the current inferior
4908 no longer has any thread executing, we give the terminal to some
4909 other inferior that has at least one thread executing. */
4910 bool swap_terminal = true;
4911
4912 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
4913 whether to report it to the user. */
4914 bool ignore_event = false;
7d3badc6
PA
4915
4916 for (thread_info *thread : all_non_exited_threads ())
f4836ba9 4917 {
d6cc5d98
PA
4918 if (swap_terminal && thread->executing)
4919 {
4920 if (thread->inf != curr_inf)
4921 {
4922 target_terminal::ours ();
4923
4924 switch_to_thread (thread);
4925 target_terminal::inferior ();
4926 }
4927 swap_terminal = false;
4928 }
4929
4930 if (!ignore_event
4931 && (thread->executing
4932 || thread->suspend.waitstatus_pending_p))
f4836ba9 4933 {
7d3badc6
PA
4934 /* Either there were no unwaited-for children left in the
4935 target at some point, but there are now, or some target
4936 other than the eventing one has unwaited-for children
4937 left. Just ignore. */
1eb8556f
SM
4938 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
4939 "(ignoring: found resumed)");
d6cc5d98
PA
4940
4941 ignore_event = true;
f4836ba9 4942 }
d6cc5d98
PA
4943
4944 if (ignore_event && !swap_terminal)
4945 break;
4946 }
4947
4948 if (ignore_event)
4949 {
4950 switch_to_inferior_no_thread (curr_inf);
4951 prepare_to_wait (ecs);
c4464ade 4952 return true;
f4836ba9
PA
4953 }
4954
4955 /* Go ahead and report the event. */
c4464ade 4956 return false;
f4836ba9
PA
4957}
4958
05ba8510
PA
4959/* Given an execution control state that has been freshly filled in by
4960 an event from the inferior, figure out what it means and take
4961 appropriate action.
4962
4963 The alternatives are:
4964
22bcd14b 4965 1) stop_waiting and return; to really stop and return to the
05ba8510
PA
4966 debugger.
4967
4968 2) keep_going and return; to wait for the next event (set
4969 ecs->event_thread->stepping_over_breakpoint to 1 to single step
4970 once). */
c906108c 4971
ec9499be 4972static void
595915c1 4973handle_inferior_event (struct execution_control_state *ecs)
cd0fc7c3 4974{
595915c1
TT
4975 /* Make sure that all temporary struct value objects that were
4976 created during the handling of the event get deleted at the
4977 end. */
4978 scoped_value_mark free_values;
4979
d6b48e9c
PA
4980 enum stop_kind stop_soon;
4981
1eb8556f 4982 infrun_debug_printf ("%s", target_waitstatus_to_string (&ecs->ws).c_str ());
c29705b7 4983
28736962
PA
4984 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
4985 {
4986 /* We had an event in the inferior, but we are not interested in
4987 handling it at this level. The lower layers have already
4988 done what needs to be done, if anything.
4989
4990 One of the possible circumstances for this is when the
4991 inferior produces output for the console. The inferior has
4992 not stopped, and we are ignoring the event. Another possible
4993 circumstance is any event which the lower level knows will be
4994 reported multiple times without an intervening resume. */
28736962
PA
4995 prepare_to_wait (ecs);
4996 return;
4997 }
4998
65706a29
PA
4999 if (ecs->ws.kind == TARGET_WAITKIND_THREAD_EXITED)
5000 {
65706a29
PA
5001 prepare_to_wait (ecs);
5002 return;
5003 }
5004
0e5bf2a8 5005 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
f4836ba9
PA
5006 && handle_no_resumed (ecs))
5007 return;
0e5bf2a8 5008
5b6d1e4f
PA
5009 /* Cache the last target/ptid/waitstatus. */
5010 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
e02bc4cc 5011
ca005067 5012 /* Always clear state belonging to the previous time we stopped. */
aa7d318d 5013 stop_stack_dummy = STOP_NONE;
ca005067 5014
0e5bf2a8
PA
5015 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
5016 {
5017 /* No unwaited-for children left. IOW, all resumed children
5018 have exited. */
c4464ade 5019 stop_print_frame = false;
22bcd14b 5020 stop_waiting (ecs);
0e5bf2a8
PA
5021 return;
5022 }
5023
8c90c137 5024 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
64776a0b 5025 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
359f5fe6 5026 {
5b6d1e4f 5027 ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
359f5fe6
PA
5028 /* If it's a new thread, add it to the thread database. */
5029 if (ecs->event_thread == NULL)
5b6d1e4f 5030 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
c1e36e3e
PA
5031
5032 /* Disable range stepping. If the next step request could use a
5033 range, this will be end up re-enabled then. */
5034 ecs->event_thread->control.may_range_step = 0;
359f5fe6 5035 }
88ed393a
JK
5036
5037 /* Dependent on valid ECS->EVENT_THREAD. */
d8dd4d5f 5038 adjust_pc_after_break (ecs->event_thread, &ecs->ws);
88ed393a
JK
5039
5040 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5041 reinit_frame_cache ();
5042
28736962
PA
5043 breakpoint_retire_moribund ();
5044
2b009048
DJ
5045 /* First, distinguish signals caused by the debugger from signals
5046 that have to do with the program's own actions. Note that
5047 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5048 on the operating system version. Here we detect when a SIGILL or
5049 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5050 something similar for SIGSEGV, since a SIGSEGV will be generated
5051 when we're trying to execute a breakpoint instruction on a
5052 non-executable stack. This happens for call dummy breakpoints
5053 for architectures like SPARC that place call dummies on the
5054 stack. */
2b009048 5055 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
a493e3e2
PA
5056 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
5057 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
5058 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
2b009048 5059 {
00431a78 5060 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
de0a0249 5061
a01bda52 5062 if (breakpoint_inserted_here_p (regcache->aspace (),
de0a0249
UW
5063 regcache_read_pc (regcache)))
5064 {
1eb8556f 5065 infrun_debug_printf ("Treating signal as SIGTRAP");
a493e3e2 5066 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
de0a0249 5067 }
2b009048
DJ
5068 }
5069
293b3ebc 5070 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
8c90c137 5071
488f131b
JB
5072 switch (ecs->ws.kind)
5073 {
5074 case TARGET_WAITKIND_LOADED:
00431a78 5075 context_switch (ecs);
b0f4b84b 5076 /* Ignore gracefully during startup of the inferior, as it might
dda83cd7
SM
5077 be the shell which has just loaded some objects, otherwise
5078 add the symbols for the newly loaded objects. Also ignore at
5079 the beginning of an attach or remote session; we will query
5080 the full list of libraries once the connection is
5081 established. */
4f5d7f63 5082
00431a78 5083 stop_soon = get_inferior_stop_soon (ecs);
c0236d92 5084 if (stop_soon == NO_STOP_QUIETLY)
488f131b 5085 {
edcc5120
TT
5086 struct regcache *regcache;
5087
00431a78 5088 regcache = get_thread_regcache (ecs->event_thread);
edcc5120
TT
5089
5090 handle_solib_event ();
5091
5092 ecs->event_thread->control.stop_bpstat
a01bda52 5093 = bpstat_stop_status (regcache->aspace (),
f2ffa92b
PA
5094 ecs->event_thread->suspend.stop_pc,
5095 ecs->event_thread, &ecs->ws);
ab04a2af 5096
c65d6b55
PA
5097 if (handle_stop_requested (ecs))
5098 return;
5099
ce12b012 5100 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
edcc5120
TT
5101 {
5102 /* A catchpoint triggered. */
94c57d6a
PA
5103 process_event_stop_test (ecs);
5104 return;
edcc5120 5105 }
488f131b 5106
b0f4b84b
DJ
5107 /* If requested, stop when the dynamic linker notifies
5108 gdb of events. This allows the user to get control
5109 and place breakpoints in initializer routines for
5110 dynamically loaded objects (among other things). */
a493e3e2 5111 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
b0f4b84b
DJ
5112 if (stop_on_solib_events)
5113 {
55409f9d
DJ
5114 /* Make sure we print "Stopped due to solib-event" in
5115 normal_stop. */
c4464ade 5116 stop_print_frame = true;
55409f9d 5117
22bcd14b 5118 stop_waiting (ecs);
b0f4b84b
DJ
5119 return;
5120 }
488f131b 5121 }
b0f4b84b
DJ
5122
5123 /* If we are skipping through a shell, or through shared library
5124 loading that we aren't interested in, resume the program. If
5c09a2c5 5125 we're running the program normally, also resume. */
b0f4b84b
DJ
5126 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5127 {
74960c60
VP
5128 /* Loading of shared libraries might have changed breakpoint
5129 addresses. Make sure new breakpoints are inserted. */
a25a5a45 5130 if (stop_soon == NO_STOP_QUIETLY)
74960c60 5131 insert_breakpoints ();
64ce06e4 5132 resume (GDB_SIGNAL_0);
b0f4b84b
DJ
5133 prepare_to_wait (ecs);
5134 return;
5135 }
5136
5c09a2c5
PA
5137 /* But stop if we're attaching or setting up a remote
5138 connection. */
5139 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5140 || stop_soon == STOP_QUIETLY_REMOTE)
5141 {
1eb8556f 5142 infrun_debug_printf ("quietly stopped");
22bcd14b 5143 stop_waiting (ecs);
5c09a2c5
PA
5144 return;
5145 }
5146
5147 internal_error (__FILE__, __LINE__,
5148 _("unhandled stop_soon: %d"), (int) stop_soon);
c5aa993b 5149
488f131b 5150 case TARGET_WAITKIND_SPURIOUS:
c65d6b55
PA
5151 if (handle_stop_requested (ecs))
5152 return;
00431a78 5153 context_switch (ecs);
64ce06e4 5154 resume (GDB_SIGNAL_0);
488f131b
JB
5155 prepare_to_wait (ecs);
5156 return;
c5aa993b 5157
65706a29 5158 case TARGET_WAITKIND_THREAD_CREATED:
c65d6b55
PA
5159 if (handle_stop_requested (ecs))
5160 return;
00431a78 5161 context_switch (ecs);
65706a29
PA
5162 if (!switch_back_to_stepped_thread (ecs))
5163 keep_going (ecs);
5164 return;
5165
488f131b 5166 case TARGET_WAITKIND_EXITED:
940c3c06 5167 case TARGET_WAITKIND_SIGNALLED:
18493a00
PA
5168 {
5169 /* Depending on the system, ecs->ptid may point to a thread or
5170 to a process. On some targets, target_mourn_inferior may
5171 need to have access to the just-exited thread. That is the
5172 case of GNU/Linux's "checkpoint" support, for example.
5173 Call the switch_to_xxx routine as appropriate. */
5174 thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
5175 if (thr != nullptr)
5176 switch_to_thread (thr);
5177 else
5178 {
5179 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5180 switch_to_inferior_no_thread (inf);
5181 }
5182 }
6c95b8df 5183 handle_vfork_child_exec_or_exit (0);
223ffa71 5184 target_terminal::ours (); /* Must do this before mourn anyway. */
488f131b 5185
0c557179
SDJ
5186 /* Clearing any previous state of convenience variables. */
5187 clear_exit_convenience_vars ();
5188
940c3c06
PA
5189 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
5190 {
5191 /* Record the exit code in the convenience variable $_exitcode, so
5192 that the user can inspect this again later. */
5193 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5194 (LONGEST) ecs->ws.value.integer);
5195
5196 /* Also record this in the inferior itself. */
5197 current_inferior ()->has_exit_code = 1;
5198 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
8cf64490 5199
98eb56a4
PA
5200 /* Support the --return-child-result option. */
5201 return_child_result_value = ecs->ws.value.integer;
5202
76727919 5203 gdb::observers::exited.notify (ecs->ws.value.integer);
940c3c06
PA
5204 }
5205 else
0c557179 5206 {
00431a78 5207 struct gdbarch *gdbarch = current_inferior ()->gdbarch;
0c557179
SDJ
5208
5209 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5210 {
5211 /* Set the value of the internal variable $_exitsignal,
5212 which holds the signal uncaught by the inferior. */
5213 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5214 gdbarch_gdb_signal_to_target (gdbarch,
5215 ecs->ws.value.sig));
5216 }
5217 else
5218 {
5219 /* We don't have access to the target's method used for
5220 converting between signal numbers (GDB's internal
5221 representation <-> target's representation).
5222 Therefore, we cannot do a good job at displaying this
5223 information to the user. It's better to just warn
5224 her about it (if infrun debugging is enabled), and
5225 give up. */
1eb8556f
SM
5226 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5227 "signal number.");
0c557179
SDJ
5228 }
5229
76727919 5230 gdb::observers::signal_exited.notify (ecs->ws.value.sig);
0c557179 5231 }
8cf64490 5232
488f131b 5233 gdb_flush (gdb_stdout);
bc1e6c81 5234 target_mourn_inferior (inferior_ptid);
c4464ade 5235 stop_print_frame = false;
22bcd14b 5236 stop_waiting (ecs);
488f131b 5237 return;
c5aa993b 5238
488f131b 5239 case TARGET_WAITKIND_FORKED:
deb3b17b 5240 case TARGET_WAITKIND_VFORKED:
e2d96639
YQ
5241 /* Check whether the inferior is displaced stepping. */
5242 {
00431a78 5243 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
ac7936df 5244 struct gdbarch *gdbarch = regcache->arch ();
c0aba012 5245 inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
e2d96639 5246
187b041e
SM
5247 /* If this is a fork (child gets its own address space copy) and some
5248 displaced step buffers were in use at the time of the fork, restore
5249 the displaced step buffer bytes in the child process. */
c0aba012 5250 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
187b041e
SM
5251 gdbarch_displaced_step_restore_all_in_ptid
5252 (gdbarch, parent_inf, ecs->ws.value.related_pid);
c0aba012
SM
5253
5254 /* If displaced stepping is supported, and thread ecs->ptid is
5255 displaced stepping. */
00431a78 5256 if (displaced_step_in_progress_thread (ecs->event_thread))
e2d96639 5257 {
e2d96639
YQ
5258 struct regcache *child_regcache;
5259 CORE_ADDR parent_pc;
5260
5261 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
5262 indicating that the displaced stepping of syscall instruction
5263 has been done. Perform cleanup for parent process here. Note
5264 that this operation also cleans up the child process for vfork,
5265 because their pages are shared. */
7def77a1 5266 displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
c2829269
PA
5267 /* Start a new step-over in another thread if there's one
5268 that needs it. */
5269 start_step_over ();
e2d96639 5270
e2d96639
YQ
5271 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
5272 the child's PC is also within the scratchpad. Set the child's PC
5273 to the parent's PC value, which has already been fixed up.
5274 FIXME: we use the parent's aspace here, although we're touching
5275 the child, because the child hasn't been added to the inferior
5276 list yet at this point. */
5277
5278 child_regcache
5b6d1e4f
PA
5279 = get_thread_arch_aspace_regcache (parent_inf->process_target (),
5280 ecs->ws.value.related_pid,
e2d96639
YQ
5281 gdbarch,
5282 parent_inf->aspace);
5283 /* Read PC value of parent process. */
5284 parent_pc = regcache_read_pc (regcache);
5285
136821d9
SM
5286 displaced_debug_printf ("write child pc from %s to %s",
5287 paddress (gdbarch,
5288 regcache_read_pc (child_regcache)),
5289 paddress (gdbarch, parent_pc));
e2d96639
YQ
5290
5291 regcache_write_pc (child_regcache, parent_pc);
5292 }
5293 }
5294
00431a78 5295 context_switch (ecs);
5a2901d9 5296
b242c3c2
PA
5297 /* Immediately detach breakpoints from the child before there's
5298 any chance of letting the user delete breakpoints from the
5299 breakpoint lists. If we don't do this early, it's easy to
5300 leave left over traps in the child, vis: "break foo; catch
5301 fork; c; <fork>; del; c; <child calls foo>". We only follow
5302 the fork on the last `continue', and by that time the
5303 breakpoint at "foo" is long gone from the breakpoint table.
5304 If we vforked, then we don't need to unpatch here, since both
5305 parent and child are sharing the same memory pages; we'll
5306 need to unpatch at follow/detach time instead to be certain
5307 that new breakpoints added between catchpoint hit time and
5308 vfork follow are detached. */
5309 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
5310 {
b242c3c2
PA
5311 /* This won't actually modify the breakpoint list, but will
5312 physically remove the breakpoints from the child. */
d80ee84f 5313 detach_breakpoints (ecs->ws.value.related_pid);
b242c3c2
PA
5314 }
5315
34b7e8a6 5316 delete_just_stopped_threads_single_step_breakpoints ();
d03285ec 5317
e58b0e63
PA
5318 /* In case the event is caught by a catchpoint, remember that
5319 the event is to be followed at the next resume of the thread,
5320 and not immediately. */
5321 ecs->event_thread->pending_follow = ecs->ws;
5322
f2ffa92b
PA
5323 ecs->event_thread->suspend.stop_pc
5324 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
675bf4cb 5325
16c381f0 5326 ecs->event_thread->control.stop_bpstat
a01bda52 5327 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5328 ecs->event_thread->suspend.stop_pc,
5329 ecs->event_thread, &ecs->ws);
675bf4cb 5330
c65d6b55
PA
5331 if (handle_stop_requested (ecs))
5332 return;
5333
ce12b012
PA
5334 /* If no catchpoint triggered for this, then keep going. Note
5335 that we're interested in knowing the bpstat actually causes a
5336 stop, not just if it may explain the signal. Software
5337 watchpoints, for example, always appear in the bpstat. */
5338 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5339 {
5ab2fbf1 5340 bool follow_child
3e43a32a 5341 = (follow_fork_mode_string == follow_fork_mode_child);
e58b0e63 5342
a493e3e2 5343 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
e58b0e63 5344
5b6d1e4f
PA
5345 process_stratum_target *targ
5346 = ecs->event_thread->inf->process_target ();
5347
5ab2fbf1 5348 bool should_resume = follow_fork ();
e58b0e63 5349
5b6d1e4f
PA
5350 /* Note that one of these may be an invalid pointer,
5351 depending on detach_fork. */
00431a78 5352 thread_info *parent = ecs->event_thread;
5b6d1e4f
PA
5353 thread_info *child
5354 = find_thread_ptid (targ, ecs->ws.value.related_pid);
6c95b8df 5355
a2077e25
PA
5356 /* At this point, the parent is marked running, and the
5357 child is marked stopped. */
5358
5359 /* If not resuming the parent, mark it stopped. */
5360 if (follow_child && !detach_fork && !non_stop && !sched_multi)
00431a78 5361 parent->set_running (false);
a2077e25
PA
5362
5363 /* If resuming the child, mark it running. */
5364 if (follow_child || (!detach_fork && (non_stop || sched_multi)))
00431a78 5365 child->set_running (true);
a2077e25 5366
6c95b8df 5367 /* In non-stop mode, also resume the other branch. */
fbea99ea
PA
5368 if (!detach_fork && (non_stop
5369 || (sched_multi && target_is_non_stop_p ())))
6c95b8df
PA
5370 {
5371 if (follow_child)
5372 switch_to_thread (parent);
5373 else
5374 switch_to_thread (child);
5375
5376 ecs->event_thread = inferior_thread ();
5377 ecs->ptid = inferior_ptid;
5378 keep_going (ecs);
5379 }
5380
5381 if (follow_child)
5382 switch_to_thread (child);
5383 else
5384 switch_to_thread (parent);
5385
e58b0e63
PA
5386 ecs->event_thread = inferior_thread ();
5387 ecs->ptid = inferior_ptid;
5388
5389 if (should_resume)
5390 keep_going (ecs);
5391 else
22bcd14b 5392 stop_waiting (ecs);
04e68871
DJ
5393 return;
5394 }
94c57d6a
PA
5395 process_event_stop_test (ecs);
5396 return;
488f131b 5397
6c95b8df
PA
5398 case TARGET_WAITKIND_VFORK_DONE:
5399 /* Done with the shared memory region. Re-insert breakpoints in
5400 the parent, and keep going. */
5401
00431a78 5402 context_switch (ecs);
6c95b8df
PA
5403
5404 current_inferior ()->waiting_for_vfork_done = 0;
56710373 5405 current_inferior ()->pspace->breakpoints_not_allowed = 0;
c65d6b55
PA
5406
5407 if (handle_stop_requested (ecs))
5408 return;
5409
6c95b8df
PA
5410 /* This also takes care of reinserting breakpoints in the
5411 previously locked inferior. */
5412 keep_going (ecs);
5413 return;
5414
488f131b 5415 case TARGET_WAITKIND_EXECD:
488f131b 5416
cbd2b4e3
PA
5417 /* Note we can't read registers yet (the stop_pc), because we
5418 don't yet know the inferior's post-exec architecture.
5419 'stop_pc' is explicitly read below instead. */
00431a78 5420 switch_to_thread_no_regs (ecs->event_thread);
5a2901d9 5421
6c95b8df
PA
5422 /* Do whatever is necessary to the parent branch of the vfork. */
5423 handle_vfork_child_exec_or_exit (1);
5424
795e548f 5425 /* This causes the eventpoints and symbol table to be reset.
dda83cd7
SM
5426 Must do this now, before trying to determine whether to
5427 stop. */
71b43ef8 5428 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
795e548f 5429
17d8546e
DB
5430 /* In follow_exec we may have deleted the original thread and
5431 created a new one. Make sure that the event thread is the
5432 execd thread for that case (this is a nop otherwise). */
5433 ecs->event_thread = inferior_thread ();
5434
f2ffa92b
PA
5435 ecs->event_thread->suspend.stop_pc
5436 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
ecdc3a72 5437
16c381f0 5438 ecs->event_thread->control.stop_bpstat
a01bda52 5439 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5440 ecs->event_thread->suspend.stop_pc,
5441 ecs->event_thread, &ecs->ws);
795e548f 5442
71b43ef8
PA
5443 /* Note that this may be referenced from inside
5444 bpstat_stop_status above, through inferior_has_execd. */
5445 xfree (ecs->ws.value.execd_pathname);
5446 ecs->ws.value.execd_pathname = NULL;
5447
c65d6b55
PA
5448 if (handle_stop_requested (ecs))
5449 return;
5450
04e68871 5451 /* If no catchpoint triggered for this, then keep going. */
ce12b012 5452 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
04e68871 5453 {
a493e3e2 5454 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
04e68871
DJ
5455 keep_going (ecs);
5456 return;
5457 }
94c57d6a
PA
5458 process_event_stop_test (ecs);
5459 return;
488f131b 5460
b4dc5ffa 5461 /* Be careful not to try to gather much state about a thread
dda83cd7 5462 that's in a syscall. It's frequently a losing proposition. */
488f131b 5463 case TARGET_WAITKIND_SYSCALL_ENTRY:
1777feb0 5464 /* Getting the current syscall number. */
94c57d6a
PA
5465 if (handle_syscall_event (ecs) == 0)
5466 process_event_stop_test (ecs);
5467 return;
c906108c 5468
488f131b 5469 /* Before examining the threads further, step this thread to
dda83cd7
SM
5470 get it entirely out of the syscall. (We get notice of the
5471 event when the thread is just on the verge of exiting a
5472 syscall. Stepping one instruction seems to get it back
5473 into user code.) */
488f131b 5474 case TARGET_WAITKIND_SYSCALL_RETURN:
94c57d6a
PA
5475 if (handle_syscall_event (ecs) == 0)
5476 process_event_stop_test (ecs);
5477 return;
c906108c 5478
488f131b 5479 case TARGET_WAITKIND_STOPPED:
4f5d7f63
PA
5480 handle_signal_stop (ecs);
5481 return;
c906108c 5482
b2175913
MS
5483 case TARGET_WAITKIND_NO_HISTORY:
5484 /* Reverse execution: target ran out of history info. */
eab402df 5485
d1988021 5486 /* Switch to the stopped thread. */
00431a78 5487 context_switch (ecs);
1eb8556f 5488 infrun_debug_printf ("stopped");
d1988021 5489
34b7e8a6 5490 delete_just_stopped_threads_single_step_breakpoints ();
f2ffa92b
PA
5491 ecs->event_thread->suspend.stop_pc
5492 = regcache_read_pc (get_thread_regcache (inferior_thread ()));
c65d6b55
PA
5493
5494 if (handle_stop_requested (ecs))
5495 return;
5496
76727919 5497 gdb::observers::no_history.notify ();
22bcd14b 5498 stop_waiting (ecs);
b2175913 5499 return;
488f131b 5500 }
4f5d7f63
PA
5501}
5502
372316f1
PA
5503/* Restart threads back to what they were trying to do back when we
5504 paused them for an in-line step-over. The EVENT_THREAD thread is
5505 ignored. */
4d9d9d04
PA
5506
5507static void
372316f1
PA
5508restart_threads (struct thread_info *event_thread)
5509{
372316f1
PA
5510 /* In case the instruction just stepped spawned a new thread. */
5511 update_thread_list ();
5512
08036331 5513 for (thread_info *tp : all_non_exited_threads ())
372316f1 5514 {
f3f8ece4
PA
5515 switch_to_thread_no_regs (tp);
5516
372316f1
PA
5517 if (tp == event_thread)
5518 {
1eb8556f
SM
5519 infrun_debug_printf ("restart threads: [%s] is event thread",
5520 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5521 continue;
5522 }
5523
5524 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
5525 {
1eb8556f
SM
5526 infrun_debug_printf ("restart threads: [%s] not meant to be running",
5527 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5528 continue;
5529 }
5530
5531 if (tp->resumed)
5532 {
1eb8556f
SM
5533 infrun_debug_printf ("restart threads: [%s] resumed",
5534 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5535 gdb_assert (tp->executing || tp->suspend.waitstatus_pending_p);
5536 continue;
5537 }
5538
5539 if (thread_is_in_step_over_chain (tp))
5540 {
1eb8556f
SM
5541 infrun_debug_printf ("restart threads: [%s] needs step-over",
5542 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5543 gdb_assert (!tp->resumed);
5544 continue;
5545 }
5546
5547
5548 if (tp->suspend.waitstatus_pending_p)
5549 {
1eb8556f
SM
5550 infrun_debug_printf ("restart threads: [%s] has pending status",
5551 target_pid_to_str (tp->ptid).c_str ());
719546c4 5552 tp->resumed = true;
372316f1
PA
5553 continue;
5554 }
5555
c65d6b55
PA
5556 gdb_assert (!tp->stop_requested);
5557
372316f1
PA
5558 /* If some thread needs to start a step-over at this point, it
5559 should still be in the step-over queue, and thus skipped
5560 above. */
5561 if (thread_still_needs_step_over (tp))
5562 {
5563 internal_error (__FILE__, __LINE__,
5564 "thread [%s] needs a step-over, but not in "
5565 "step-over queue\n",
a068643d 5566 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5567 }
5568
5569 if (currently_stepping (tp))
5570 {
1eb8556f
SM
5571 infrun_debug_printf ("restart threads: [%s] was stepping",
5572 target_pid_to_str (tp->ptid).c_str ());
372316f1
PA
5573 keep_going_stepped_thread (tp);
5574 }
5575 else
5576 {
5577 struct execution_control_state ecss;
5578 struct execution_control_state *ecs = &ecss;
5579
1eb8556f
SM
5580 infrun_debug_printf ("restart threads: [%s] continuing",
5581 target_pid_to_str (tp->ptid).c_str ());
372316f1 5582 reset_ecs (ecs, tp);
00431a78 5583 switch_to_thread (tp);
372316f1
PA
5584 keep_going_pass_signal (ecs);
5585 }
5586 }
5587}
5588
5589/* Callback for iterate_over_threads. Find a resumed thread that has
5590 a pending waitstatus. */
5591
5592static int
5593resumed_thread_with_pending_status (struct thread_info *tp,
5594 void *arg)
5595{
5596 return (tp->resumed
5597 && tp->suspend.waitstatus_pending_p);
5598}
5599
5600/* Called when we get an event that may finish an in-line or
5601 out-of-line (displaced stepping) step-over started previously.
5602 Return true if the event is processed and we should go back to the
5603 event loop; false if the caller should continue processing the
5604 event. */
5605
5606static int
4d9d9d04
PA
5607finish_step_over (struct execution_control_state *ecs)
5608{
7def77a1
SM
5609 displaced_step_finish (ecs->event_thread,
5610 ecs->event_thread->suspend.stop_signal);
4d9d9d04 5611
c4464ade 5612 bool had_step_over_info = step_over_info_valid_p ();
372316f1
PA
5613
5614 if (had_step_over_info)
4d9d9d04
PA
5615 {
5616 /* If we're stepping over a breakpoint with all threads locked,
5617 then only the thread that was stepped should be reporting
5618 back an event. */
5619 gdb_assert (ecs->event_thread->control.trap_expected);
5620
c65d6b55 5621 clear_step_over_info ();
4d9d9d04
PA
5622 }
5623
fbea99ea 5624 if (!target_is_non_stop_p ())
372316f1 5625 return 0;
4d9d9d04
PA
5626
5627 /* Start a new step-over in another thread if there's one that
5628 needs it. */
5629 start_step_over ();
372316f1
PA
5630
5631 /* If we were stepping over a breakpoint before, and haven't started
5632 a new in-line step-over sequence, then restart all other threads
5633 (except the event thread). We can't do this in all-stop, as then
5634 e.g., we wouldn't be able to issue any other remote packet until
5635 these other threads stop. */
5636 if (had_step_over_info && !step_over_info_valid_p ())
5637 {
5638 struct thread_info *pending;
5639
5640 /* If we only have threads with pending statuses, the restart
5641 below won't restart any thread and so nothing re-inserts the
5642 breakpoint we just stepped over. But we need it inserted
5643 when we later process the pending events, otherwise if
5644 another thread has a pending event for this breakpoint too,
5645 we'd discard its event (because the breakpoint that
5646 originally caused the event was no longer inserted). */
00431a78 5647 context_switch (ecs);
372316f1
PA
5648 insert_breakpoints ();
5649
5650 restart_threads (ecs->event_thread);
5651
5652 /* If we have events pending, go through handle_inferior_event
5653 again, picking up a pending event at random. This avoids
5654 thread starvation. */
5655
5656 /* But not if we just stepped over a watchpoint in order to let
5657 the instruction execute so we can evaluate its expression.
5658 The set of watchpoints that triggered is recorded in the
5659 breakpoint objects themselves (see bp->watchpoint_triggered).
5660 If we processed another event first, that other event could
5661 clobber this info. */
5662 if (ecs->event_thread->stepping_over_watchpoint)
5663 return 0;
5664
5665 pending = iterate_over_threads (resumed_thread_with_pending_status,
5666 NULL);
5667 if (pending != NULL)
5668 {
5669 struct thread_info *tp = ecs->event_thread;
5670 struct regcache *regcache;
5671
1eb8556f
SM
5672 infrun_debug_printf ("found resumed threads with "
5673 "pending events, saving status");
372316f1
PA
5674
5675 gdb_assert (pending != tp);
5676
5677 /* Record the event thread's event for later. */
5678 save_waitstatus (tp, &ecs->ws);
5679 /* This was cleared early, by handle_inferior_event. Set it
5680 so this pending event is considered by
5681 do_target_wait. */
719546c4 5682 tp->resumed = true;
372316f1
PA
5683
5684 gdb_assert (!tp->executing);
5685
00431a78 5686 regcache = get_thread_regcache (tp);
372316f1
PA
5687 tp->suspend.stop_pc = regcache_read_pc (regcache);
5688
1eb8556f
SM
5689 infrun_debug_printf ("saved stop_pc=%s for %s "
5690 "(currently_stepping=%d)",
5691 paddress (target_gdbarch (),
dda83cd7 5692 tp->suspend.stop_pc),
1eb8556f
SM
5693 target_pid_to_str (tp->ptid).c_str (),
5694 currently_stepping (tp));
372316f1
PA
5695
5696 /* This in-line step-over finished; clear this so we won't
5697 start a new one. This is what handle_signal_stop would
5698 do, if we returned false. */
5699 tp->stepping_over_breakpoint = 0;
5700
5701 /* Wake up the event loop again. */
5702 mark_async_event_handler (infrun_async_inferior_event_token);
5703
5704 prepare_to_wait (ecs);
5705 return 1;
5706 }
5707 }
5708
5709 return 0;
4d9d9d04
PA
5710}
5711
4f5d7f63
PA
5712/* Come here when the program has stopped with a signal. */
5713
5714static void
5715handle_signal_stop (struct execution_control_state *ecs)
5716{
5717 struct frame_info *frame;
5718 struct gdbarch *gdbarch;
5719 int stopped_by_watchpoint;
5720 enum stop_kind stop_soon;
5721 int random_signal;
c906108c 5722
f0407826
DE
5723 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
5724
c65d6b55
PA
5725 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
5726
f0407826
DE
5727 /* Do we need to clean up the state of a thread that has
5728 completed a displaced single-step? (Doing so usually affects
5729 the PC, so do it here, before we set stop_pc.) */
372316f1
PA
5730 if (finish_step_over (ecs))
5731 return;
f0407826
DE
5732
5733 /* If we either finished a single-step or hit a breakpoint, but
5734 the user wanted this thread to be stopped, pretend we got a
5735 SIG0 (generic unsignaled stop). */
5736 if (ecs->event_thread->stop_requested
5737 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
5738 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
237fc4c9 5739
f2ffa92b
PA
5740 ecs->event_thread->suspend.stop_pc
5741 = regcache_read_pc (get_thread_regcache (ecs->event_thread));
488f131b 5742
527159b7 5743 if (debug_infrun)
237fc4c9 5744 {
00431a78 5745 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
b926417a 5746 struct gdbarch *reg_gdbarch = regcache->arch ();
7f82dfc7 5747
f3f8ece4 5748 switch_to_thread (ecs->event_thread);
5af949e3 5749
1eb8556f
SM
5750 infrun_debug_printf ("stop_pc=%s",
5751 paddress (reg_gdbarch,
5752 ecs->event_thread->suspend.stop_pc));
d92524f1 5753 if (target_stopped_by_watchpoint ())
237fc4c9 5754 {
dda83cd7 5755 CORE_ADDR addr;
abbb1732 5756
1eb8556f 5757 infrun_debug_printf ("stopped by watchpoint");
237fc4c9 5758
8b88a78e 5759 if (target_stopped_data_address (current_top_target (), &addr))
1eb8556f 5760 infrun_debug_printf ("stopped data address=%s",
dda83cd7
SM
5761 paddress (reg_gdbarch, addr));
5762 else
1eb8556f 5763 infrun_debug_printf ("(no data address available)");
237fc4c9
PA
5764 }
5765 }
527159b7 5766
36fa8042
PA
5767 /* This is originated from start_remote(), start_inferior() and
5768 shared libraries hook functions. */
00431a78 5769 stop_soon = get_inferior_stop_soon (ecs);
36fa8042
PA
5770 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
5771 {
00431a78 5772 context_switch (ecs);
1eb8556f 5773 infrun_debug_printf ("quietly stopped");
c4464ade 5774 stop_print_frame = true;
22bcd14b 5775 stop_waiting (ecs);
36fa8042
PA
5776 return;
5777 }
5778
36fa8042
PA
5779 /* This originates from attach_command(). We need to overwrite
5780 the stop_signal here, because some kernels don't ignore a
5781 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
5782 See more comments in inferior.h. On the other hand, if we
5783 get a non-SIGSTOP, report it to the user - assume the backend
5784 will handle the SIGSTOP if it should show up later.
5785
5786 Also consider that the attach is complete when we see a
5787 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
5788 target extended-remote report it instead of a SIGSTOP
5789 (e.g. gdbserver). We already rely on SIGTRAP being our
5790 signal, so this is no exception.
5791
5792 Also consider that the attach is complete when we see a
5793 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
5794 the target to stop all threads of the inferior, in case the
5795 low level attach operation doesn't stop them implicitly. If
5796 they weren't stopped implicitly, then the stub will report a
5797 GDB_SIGNAL_0, meaning: stopped for no particular reason
5798 other than GDB's request. */
5799 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5800 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
5801 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5802 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
5803 {
c4464ade 5804 stop_print_frame = true;
22bcd14b 5805 stop_waiting (ecs);
36fa8042
PA
5806 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5807 return;
5808 }
5809
488f131b 5810 /* See if something interesting happened to the non-current thread. If
b40c7d58 5811 so, then switch to that thread. */
d7e15655 5812 if (ecs->ptid != inferior_ptid)
488f131b 5813 {
1eb8556f 5814 infrun_debug_printf ("context switch");
527159b7 5815
00431a78 5816 context_switch (ecs);
c5aa993b 5817
9a4105ab 5818 if (deprecated_context_hook)
00431a78 5819 deprecated_context_hook (ecs->event_thread->global_num);
488f131b 5820 }
c906108c 5821
568d6575
UW
5822 /* At this point, get hold of the now-current thread's frame. */
5823 frame = get_current_frame ();
5824 gdbarch = get_frame_arch (frame);
5825
2adfaa28 5826 /* Pull the single step breakpoints out of the target. */
af48d08f 5827 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
488f131b 5828 {
af48d08f 5829 struct regcache *regcache;
af48d08f 5830 CORE_ADDR pc;
2adfaa28 5831
00431a78 5832 regcache = get_thread_regcache (ecs->event_thread);
8b86c959
YQ
5833 const address_space *aspace = regcache->aspace ();
5834
af48d08f 5835 pc = regcache_read_pc (regcache);
34b7e8a6 5836
af48d08f
PA
5837 /* However, before doing so, if this single-step breakpoint was
5838 actually for another thread, set this thread up for moving
5839 past it. */
5840 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
5841 aspace, pc))
5842 {
5843 if (single_step_breakpoint_inserted_here_p (aspace, pc))
2adfaa28 5844 {
1eb8556f
SM
5845 infrun_debug_printf ("[%s] hit another thread's single-step "
5846 "breakpoint",
5847 target_pid_to_str (ecs->ptid).c_str ());
af48d08f
PA
5848 ecs->hit_singlestep_breakpoint = 1;
5849 }
5850 }
5851 else
5852 {
1eb8556f
SM
5853 infrun_debug_printf ("[%s] hit its single-step breakpoint",
5854 target_pid_to_str (ecs->ptid).c_str ());
2adfaa28 5855 }
488f131b 5856 }
af48d08f 5857 delete_just_stopped_threads_single_step_breakpoints ();
c906108c 5858
963f9c80
PA
5859 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5860 && ecs->event_thread->control.trap_expected
5861 && ecs->event_thread->stepping_over_watchpoint)
d983da9c
DJ
5862 stopped_by_watchpoint = 0;
5863 else
5864 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
5865
5866 /* If necessary, step over this watchpoint. We'll be back to display
5867 it in a moment. */
5868 if (stopped_by_watchpoint
9aed480c 5869 && (target_have_steppable_watchpoint ()
568d6575 5870 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
488f131b 5871 {
488f131b 5872 /* At this point, we are stopped at an instruction which has
dda83cd7
SM
5873 attempted to write to a piece of memory under control of
5874 a watchpoint. The instruction hasn't actually executed
5875 yet. If we were to evaluate the watchpoint expression
5876 now, we would get the old value, and therefore no change
5877 would seem to have occurred.
5878
5879 In order to make watchpoints work `right', we really need
5880 to complete the memory write, and then evaluate the
5881 watchpoint expression. We do this by single-stepping the
d983da9c
DJ
5882 target.
5883
7f89fd65 5884 It may not be necessary to disable the watchpoint to step over
d983da9c
DJ
5885 it. For example, the PA can (with some kernel cooperation)
5886 single step over a watchpoint without disabling the watchpoint.
5887
5888 It is far more common to need to disable a watchpoint to step
5889 the inferior over it. If we have non-steppable watchpoints,
5890 we must disable the current watchpoint; it's simplest to
963f9c80
PA
5891 disable all watchpoints.
5892
5893 Any breakpoint at PC must also be stepped over -- if there's
5894 one, it will have already triggered before the watchpoint
5895 triggered, and we either already reported it to the user, or
5896 it didn't cause a stop and we called keep_going. In either
5897 case, if there was a breakpoint at PC, we must be trying to
5898 step past it. */
5899 ecs->event_thread->stepping_over_watchpoint = 1;
5900 keep_going (ecs);
488f131b
JB
5901 return;
5902 }
5903
4e1c45ea 5904 ecs->event_thread->stepping_over_breakpoint = 0;
963f9c80 5905 ecs->event_thread->stepping_over_watchpoint = 0;
16c381f0
JK
5906 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
5907 ecs->event_thread->control.stop_step = 0;
c4464ade 5908 stop_print_frame = true;
488f131b 5909 stopped_by_random_signal = 0;
ddfe970e 5910 bpstat stop_chain = NULL;
488f131b 5911
edb3359d
DJ
5912 /* Hide inlined functions starting here, unless we just performed stepi or
5913 nexti. After stepi and nexti, always show the innermost frame (not any
5914 inline function call sites). */
16c381f0 5915 if (ecs->event_thread->control.step_range_end != 1)
0574c78f 5916 {
00431a78
PA
5917 const address_space *aspace
5918 = get_thread_regcache (ecs->event_thread)->aspace ();
0574c78f
GB
5919
5920 /* skip_inline_frames is expensive, so we avoid it if we can
5921 determine that the address is one where functions cannot have
5922 been inlined. This improves performance with inferiors that
5923 load a lot of shared libraries, because the solib event
5924 breakpoint is defined as the address of a function (i.e. not
5925 inline). Note that we have to check the previous PC as well
5926 as the current one to catch cases when we have just
5927 single-stepped off a breakpoint prior to reinstating it.
5928 Note that we're assuming that the code we single-step to is
5929 not inline, but that's not definitive: there's nothing
5930 preventing the event breakpoint function from containing
5931 inlined code, and the single-step ending up there. If the
5932 user had set a breakpoint on that inlined code, the missing
5933 skip_inline_frames call would break things. Fortunately
5934 that's an extremely unlikely scenario. */
f2ffa92b
PA
5935 if (!pc_at_non_inline_function (aspace,
5936 ecs->event_thread->suspend.stop_pc,
5937 &ecs->ws)
a210c238
MR
5938 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5939 && ecs->event_thread->control.trap_expected
5940 && pc_at_non_inline_function (aspace,
5941 ecs->event_thread->prev_pc,
09ac7c10 5942 &ecs->ws)))
1c5a993e 5943 {
f2ffa92b
PA
5944 stop_chain = build_bpstat_chain (aspace,
5945 ecs->event_thread->suspend.stop_pc,
5946 &ecs->ws);
00431a78 5947 skip_inline_frames (ecs->event_thread, stop_chain);
1c5a993e
MR
5948
5949 /* Re-fetch current thread's frame in case that invalidated
5950 the frame cache. */
5951 frame = get_current_frame ();
5952 gdbarch = get_frame_arch (frame);
5953 }
0574c78f 5954 }
edb3359d 5955
a493e3e2 5956 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
16c381f0 5957 && ecs->event_thread->control.trap_expected
568d6575 5958 && gdbarch_single_step_through_delay_p (gdbarch)
4e1c45ea 5959 && currently_stepping (ecs->event_thread))
3352ef37 5960 {
b50d7442 5961 /* We're trying to step off a breakpoint. Turns out that we're
3352ef37 5962 also on an instruction that needs to be stepped multiple
1777feb0 5963 times before it's been fully executing. E.g., architectures
3352ef37
AC
5964 with a delay slot. It needs to be stepped twice, once for
5965 the instruction and once for the delay slot. */
5966 int step_through_delay
568d6575 5967 = gdbarch_single_step_through_delay (gdbarch, frame);
abbb1732 5968
1eb8556f
SM
5969 if (step_through_delay)
5970 infrun_debug_printf ("step through delay");
5971
16c381f0
JK
5972 if (ecs->event_thread->control.step_range_end == 0
5973 && step_through_delay)
3352ef37
AC
5974 {
5975 /* The user issued a continue when stopped at a breakpoint.
5976 Set up for another trap and get out of here. */
dda83cd7
SM
5977 ecs->event_thread->stepping_over_breakpoint = 1;
5978 keep_going (ecs);
5979 return;
3352ef37
AC
5980 }
5981 else if (step_through_delay)
5982 {
5983 /* The user issued a step when stopped at a breakpoint.
5984 Maybe we should stop, maybe we should not - the delay
5985 slot *might* correspond to a line of source. In any
ca67fcb8
VP
5986 case, don't decide that here, just set
5987 ecs->stepping_over_breakpoint, making sure we
5988 single-step again before breakpoints are re-inserted. */
4e1c45ea 5989 ecs->event_thread->stepping_over_breakpoint = 1;
3352ef37
AC
5990 }
5991 }
5992
ab04a2af
TT
5993 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
5994 handles this event. */
5995 ecs->event_thread->control.stop_bpstat
a01bda52 5996 = bpstat_stop_status (get_current_regcache ()->aspace (),
f2ffa92b
PA
5997 ecs->event_thread->suspend.stop_pc,
5998 ecs->event_thread, &ecs->ws, stop_chain);
db82e815 5999
ab04a2af
TT
6000 /* Following in case break condition called a
6001 function. */
c4464ade 6002 stop_print_frame = true;
73dd234f 6003
ab04a2af
TT
6004 /* This is where we handle "moribund" watchpoints. Unlike
6005 software breakpoints traps, hardware watchpoint traps are
6006 always distinguishable from random traps. If no high-level
6007 watchpoint is associated with the reported stop data address
6008 anymore, then the bpstat does not explain the signal ---
6009 simply make sure to ignore it if `stopped_by_watchpoint' is
6010 set. */
6011
1eb8556f 6012 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
47591c29 6013 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
427cd150 6014 GDB_SIGNAL_TRAP)
ab04a2af 6015 && stopped_by_watchpoint)
1eb8556f
SM
6016 {
6017 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6018 "ignoring");
6019 }
73dd234f 6020
bac7d97b 6021 /* NOTE: cagney/2003-03-29: These checks for a random signal
ab04a2af
TT
6022 at one stage in the past included checks for an inferior
6023 function call's call dummy's return breakpoint. The original
6024 comment, that went with the test, read:
03cebad2 6025
ab04a2af
TT
6026 ``End of a stack dummy. Some systems (e.g. Sony news) give
6027 another signal besides SIGTRAP, so check here as well as
6028 above.''
73dd234f 6029
ab04a2af
TT
6030 If someone ever tries to get call dummys on a
6031 non-executable stack to work (where the target would stop
6032 with something like a SIGSEGV), then those tests might need
6033 to be re-instated. Given, however, that the tests were only
6034 enabled when momentary breakpoints were not being used, I
6035 suspect that it won't be the case.
488f131b 6036
ab04a2af
TT
6037 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6038 be necessary for call dummies on a non-executable stack on
6039 SPARC. */
488f131b 6040
bac7d97b 6041 /* See if the breakpoints module can explain the signal. */
47591c29
PA
6042 random_signal
6043 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6044 ecs->event_thread->suspend.stop_signal);
bac7d97b 6045
1cf4d951
PA
6046 /* Maybe this was a trap for a software breakpoint that has since
6047 been removed. */
6048 if (random_signal && target_stopped_by_sw_breakpoint ())
6049 {
5133a315
LM
6050 if (gdbarch_program_breakpoint_here_p (gdbarch,
6051 ecs->event_thread->suspend.stop_pc))
1cf4d951
PA
6052 {
6053 struct regcache *regcache;
6054 int decr_pc;
6055
6056 /* Re-adjust PC to what the program would see if GDB was not
6057 debugging it. */
00431a78 6058 regcache = get_thread_regcache (ecs->event_thread);
527a273a 6059 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1cf4d951
PA
6060 if (decr_pc != 0)
6061 {
07036511
TT
6062 gdb::optional<scoped_restore_tmpl<int>>
6063 restore_operation_disable;
1cf4d951
PA
6064
6065 if (record_full_is_used ())
07036511
TT
6066 restore_operation_disable.emplace
6067 (record_full_gdb_operation_disable_set ());
1cf4d951 6068
f2ffa92b
PA
6069 regcache_write_pc (regcache,
6070 ecs->event_thread->suspend.stop_pc + decr_pc);
1cf4d951
PA
6071 }
6072 }
6073 else
6074 {
6075 /* A delayed software breakpoint event. Ignore the trap. */
1eb8556f 6076 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
1cf4d951
PA
6077 random_signal = 0;
6078 }
6079 }
6080
6081 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6082 has since been removed. */
6083 if (random_signal && target_stopped_by_hw_breakpoint ())
6084 {
6085 /* A delayed hardware breakpoint event. Ignore the trap. */
1eb8556f
SM
6086 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6087 "trap, ignoring");
1cf4d951
PA
6088 random_signal = 0;
6089 }
6090
bac7d97b
PA
6091 /* If not, perhaps stepping/nexting can. */
6092 if (random_signal)
6093 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6094 && currently_stepping (ecs->event_thread));
ab04a2af 6095
2adfaa28
PA
6096 /* Perhaps the thread hit a single-step breakpoint of _another_
6097 thread. Single-step breakpoints are transparent to the
6098 breakpoints module. */
6099 if (random_signal)
6100 random_signal = !ecs->hit_singlestep_breakpoint;
6101
bac7d97b
PA
6102 /* No? Perhaps we got a moribund watchpoint. */
6103 if (random_signal)
6104 random_signal = !stopped_by_watchpoint;
ab04a2af 6105
c65d6b55
PA
6106 /* Always stop if the user explicitly requested this thread to
6107 remain stopped. */
6108 if (ecs->event_thread->stop_requested)
6109 {
6110 random_signal = 1;
1eb8556f 6111 infrun_debug_printf ("user-requested stop");
c65d6b55
PA
6112 }
6113
488f131b
JB
6114 /* For the program's own signals, act according to
6115 the signal handling tables. */
6116
ce12b012 6117 if (random_signal)
488f131b
JB
6118 {
6119 /* Signal not for debugging purposes. */
5b6d1e4f 6120 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
c9737c08 6121 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
488f131b 6122
1eb8556f
SM
6123 infrun_debug_printf ("random signal (%s)",
6124 gdb_signal_to_symbol_string (stop_signal));
527159b7 6125
488f131b
JB
6126 stopped_by_random_signal = 1;
6127
252fbfc8
PA
6128 /* Always stop on signals if we're either just gaining control
6129 of the program, or the user explicitly requested this thread
6130 to remain stopped. */
d6b48e9c 6131 if (stop_soon != NO_STOP_QUIETLY
252fbfc8 6132 || ecs->event_thread->stop_requested
24291992 6133 || (!inf->detaching
16c381f0 6134 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
488f131b 6135 {
22bcd14b 6136 stop_waiting (ecs);
488f131b
JB
6137 return;
6138 }
b57bacec
PA
6139
6140 /* Notify observers the signal has "handle print" set. Note we
6141 returned early above if stopping; normal_stop handles the
6142 printing in that case. */
6143 if (signal_print[ecs->event_thread->suspend.stop_signal])
6144 {
6145 /* The signal table tells us to print about this signal. */
223ffa71 6146 target_terminal::ours_for_output ();
76727919 6147 gdb::observers::signal_received.notify (ecs->event_thread->suspend.stop_signal);
223ffa71 6148 target_terminal::inferior ();
b57bacec 6149 }
488f131b
JB
6150
6151 /* Clear the signal if it should not be passed. */
16c381f0 6152 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
a493e3e2 6153 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
488f131b 6154
f2ffa92b 6155 if (ecs->event_thread->prev_pc == ecs->event_thread->suspend.stop_pc
16c381f0 6156 && ecs->event_thread->control.trap_expected
8358c15c 6157 && ecs->event_thread->control.step_resume_breakpoint == NULL)
68f53502
AC
6158 {
6159 /* We were just starting a new sequence, attempting to
6160 single-step off of a breakpoint and expecting a SIGTRAP.
237fc4c9 6161 Instead this signal arrives. This signal will take us out
68f53502
AC
6162 of the stepping range so GDB needs to remember to, when
6163 the signal handler returns, resume stepping off that
6164 breakpoint. */
6165 /* To simplify things, "continue" is forced to use the same
6166 code paths as single-step - set a breakpoint at the
6167 signal return address and then, once hit, step off that
6168 breakpoint. */
1eb8556f 6169 infrun_debug_printf ("signal arrived while stepping over breakpoint");
d3169d93 6170
2c03e5be 6171 insert_hp_step_resume_breakpoint_at_frame (frame);
4e1c45ea 6172 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6173 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6174 ecs->event_thread->control.trap_expected = 0;
d137e6dc
PA
6175
6176 /* If we were nexting/stepping some other thread, switch to
6177 it, so that we don't continue it, losing control. */
6178 if (!switch_back_to_stepped_thread (ecs))
6179 keep_going (ecs);
9d799f85 6180 return;
68f53502 6181 }
9d799f85 6182
e5f8a7cc 6183 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
f2ffa92b
PA
6184 && (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6185 ecs->event_thread)
e5f8a7cc 6186 || ecs->event_thread->control.step_range_end == 1)
edb3359d 6187 && frame_id_eq (get_stack_frame_id (frame),
16c381f0 6188 ecs->event_thread->control.step_stack_frame_id)
8358c15c 6189 && ecs->event_thread->control.step_resume_breakpoint == NULL)
d303a6c7
AC
6190 {
6191 /* The inferior is about to take a signal that will take it
6192 out of the single step range. Set a breakpoint at the
6193 current PC (which is presumably where the signal handler
6194 will eventually return) and then allow the inferior to
6195 run free.
6196
6197 Note that this is only needed for a signal delivered
6198 while in the single-step range. Nested signals aren't a
6199 problem as they eventually all return. */
1eb8556f 6200 infrun_debug_printf ("signal may take us out of single-step range");
237fc4c9 6201
372316f1 6202 clear_step_over_info ();
2c03e5be 6203 insert_hp_step_resume_breakpoint_at_frame (frame);
e5f8a7cc 6204 ecs->event_thread->step_after_step_resume_breakpoint = 1;
2455069d
UW
6205 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6206 ecs->event_thread->control.trap_expected = 0;
9d799f85
AC
6207 keep_going (ecs);
6208 return;
d303a6c7 6209 }
9d799f85 6210
85102364 6211 /* Note: step_resume_breakpoint may be non-NULL. This occurs
9d799f85
AC
6212 when either there's a nested signal, or when there's a
6213 pending signal enabled just as the signal handler returns
6214 (leaving the inferior at the step-resume-breakpoint without
6215 actually executing it). Either way continue until the
6216 breakpoint is really hit. */
c447ac0b
PA
6217
6218 if (!switch_back_to_stepped_thread (ecs))
6219 {
1eb8556f 6220 infrun_debug_printf ("random signal, keep going");
c447ac0b
PA
6221
6222 keep_going (ecs);
6223 }
6224 return;
488f131b 6225 }
94c57d6a
PA
6226
6227 process_event_stop_test (ecs);
6228}
6229
6230/* Come here when we've got some debug event / signal we can explain
6231 (IOW, not a random signal), and test whether it should cause a
6232 stop, or whether we should resume the inferior (transparently).
6233 E.g., could be a breakpoint whose condition evaluates false; we
6234 could be still stepping within the line; etc. */
6235
6236static void
6237process_event_stop_test (struct execution_control_state *ecs)
6238{
6239 struct symtab_and_line stop_pc_sal;
6240 struct frame_info *frame;
6241 struct gdbarch *gdbarch;
cdaa5b73
PA
6242 CORE_ADDR jmp_buf_pc;
6243 struct bpstat_what what;
94c57d6a 6244
cdaa5b73 6245 /* Handle cases caused by hitting a breakpoint. */
611c83ae 6246
cdaa5b73
PA
6247 frame = get_current_frame ();
6248 gdbarch = get_frame_arch (frame);
fcf3daef 6249
cdaa5b73 6250 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
611c83ae 6251
cdaa5b73
PA
6252 if (what.call_dummy)
6253 {
6254 stop_stack_dummy = what.call_dummy;
6255 }
186c406b 6256
243a9253
PA
6257 /* A few breakpoint types have callbacks associated (e.g.,
6258 bp_jit_event). Run them now. */
6259 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6260
cdaa5b73
PA
6261 /* If we hit an internal event that triggers symbol changes, the
6262 current frame will be invalidated within bpstat_what (e.g., if we
6263 hit an internal solib event). Re-fetch it. */
6264 frame = get_current_frame ();
6265 gdbarch = get_frame_arch (frame);
e2e4d78b 6266
cdaa5b73
PA
6267 switch (what.main_action)
6268 {
6269 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
6270 /* If we hit the breakpoint at longjmp while stepping, we
6271 install a momentary breakpoint at the target of the
6272 jmp_buf. */
186c406b 6273
1eb8556f 6274 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
186c406b 6275
cdaa5b73 6276 ecs->event_thread->stepping_over_breakpoint = 1;
611c83ae 6277
cdaa5b73
PA
6278 if (what.is_longjmp)
6279 {
6280 struct value *arg_value;
6281
6282 /* If we set the longjmp breakpoint via a SystemTap probe,
6283 then use it to extract the arguments. The destination PC
6284 is the third argument to the probe. */
6285 arg_value = probe_safe_evaluate_at_pc (frame, 2);
6286 if (arg_value)
8fa0c4f8
AA
6287 {
6288 jmp_buf_pc = value_as_address (arg_value);
6289 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
6290 }
cdaa5b73
PA
6291 else if (!gdbarch_get_longjmp_target_p (gdbarch)
6292 || !gdbarch_get_longjmp_target (gdbarch,
6293 frame, &jmp_buf_pc))
e2e4d78b 6294 {
1eb8556f
SM
6295 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
6296 "(!gdbarch_get_longjmp_target)");
cdaa5b73
PA
6297 keep_going (ecs);
6298 return;
e2e4d78b 6299 }
e2e4d78b 6300
cdaa5b73
PA
6301 /* Insert a breakpoint at resume address. */
6302 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
6303 }
6304 else
6305 check_exception_resume (ecs, frame);
6306 keep_going (ecs);
6307 return;
e81a37f7 6308
cdaa5b73
PA
6309 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
6310 {
6311 struct frame_info *init_frame;
e81a37f7 6312
cdaa5b73 6313 /* There are several cases to consider.
c906108c 6314
cdaa5b73
PA
6315 1. The initiating frame no longer exists. In this case we
6316 must stop, because the exception or longjmp has gone too
6317 far.
2c03e5be 6318
cdaa5b73
PA
6319 2. The initiating frame exists, and is the same as the
6320 current frame. We stop, because the exception or longjmp
6321 has been caught.
2c03e5be 6322
cdaa5b73
PA
6323 3. The initiating frame exists and is different from the
6324 current frame. This means the exception or longjmp has
6325 been caught beneath the initiating frame, so keep going.
c906108c 6326
cdaa5b73
PA
6327 4. longjmp breakpoint has been placed just to protect
6328 against stale dummy frames and user is not interested in
6329 stopping around longjmps. */
c5aa993b 6330
1eb8556f 6331 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
c5aa993b 6332
cdaa5b73
PA
6333 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
6334 != NULL);
6335 delete_exception_resume_breakpoint (ecs->event_thread);
c5aa993b 6336
cdaa5b73
PA
6337 if (what.is_longjmp)
6338 {
b67a2c6f 6339 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
c5aa993b 6340
cdaa5b73 6341 if (!frame_id_p (ecs->event_thread->initiating_frame))
e5ef252a 6342 {
cdaa5b73
PA
6343 /* Case 4. */
6344 keep_going (ecs);
6345 return;
e5ef252a 6346 }
cdaa5b73 6347 }
c5aa993b 6348
cdaa5b73 6349 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
527159b7 6350
cdaa5b73
PA
6351 if (init_frame)
6352 {
6353 struct frame_id current_id
6354 = get_frame_id (get_current_frame ());
6355 if (frame_id_eq (current_id,
6356 ecs->event_thread->initiating_frame))
6357 {
6358 /* Case 2. Fall through. */
6359 }
6360 else
6361 {
6362 /* Case 3. */
6363 keep_going (ecs);
6364 return;
6365 }
68f53502 6366 }
488f131b 6367
cdaa5b73
PA
6368 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
6369 exists. */
6370 delete_step_resume_breakpoint (ecs->event_thread);
e5ef252a 6371
bdc36728 6372 end_stepping_range (ecs);
cdaa5b73
PA
6373 }
6374 return;
e5ef252a 6375
cdaa5b73 6376 case BPSTAT_WHAT_SINGLE:
1eb8556f 6377 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
cdaa5b73
PA
6378 ecs->event_thread->stepping_over_breakpoint = 1;
6379 /* Still need to check other stuff, at least the case where we
6380 are stepping and step out of the right range. */
6381 break;
e5ef252a 6382
cdaa5b73 6383 case BPSTAT_WHAT_STEP_RESUME:
1eb8556f 6384 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
e5ef252a 6385
cdaa5b73
PA
6386 delete_step_resume_breakpoint (ecs->event_thread);
6387 if (ecs->event_thread->control.proceed_to_finish
6388 && execution_direction == EXEC_REVERSE)
6389 {
6390 struct thread_info *tp = ecs->event_thread;
6391
6392 /* We are finishing a function in reverse, and just hit the
6393 step-resume breakpoint at the start address of the
6394 function, and we're almost there -- just need to back up
6395 by one more single-step, which should take us back to the
6396 function call. */
6397 tp->control.step_range_start = tp->control.step_range_end = 1;
6398 keep_going (ecs);
e5ef252a 6399 return;
cdaa5b73
PA
6400 }
6401 fill_in_stop_func (gdbarch, ecs);
f2ffa92b 6402 if (ecs->event_thread->suspend.stop_pc == ecs->stop_func_start
cdaa5b73
PA
6403 && execution_direction == EXEC_REVERSE)
6404 {
6405 /* We are stepping over a function call in reverse, and just
6406 hit the step-resume breakpoint at the start address of
6407 the function. Go back to single-stepping, which should
6408 take us back to the function call. */
6409 ecs->event_thread->stepping_over_breakpoint = 1;
6410 keep_going (ecs);
6411 return;
6412 }
6413 break;
e5ef252a 6414
cdaa5b73 6415 case BPSTAT_WHAT_STOP_NOISY:
1eb8556f 6416 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
c4464ade 6417 stop_print_frame = true;
e5ef252a 6418
33bf4c5c 6419 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6420 whether a/the breakpoint is there when the thread is next
6421 resumed. */
6422 ecs->event_thread->stepping_over_breakpoint = 1;
e5ef252a 6423
22bcd14b 6424 stop_waiting (ecs);
cdaa5b73 6425 return;
e5ef252a 6426
cdaa5b73 6427 case BPSTAT_WHAT_STOP_SILENT:
1eb8556f 6428 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
c4464ade 6429 stop_print_frame = false;
e5ef252a 6430
33bf4c5c 6431 /* Assume the thread stopped for a breakpoint. We'll still check
99619bea
PA
6432 whether a/the breakpoint is there when the thread is next
6433 resumed. */
6434 ecs->event_thread->stepping_over_breakpoint = 1;
22bcd14b 6435 stop_waiting (ecs);
cdaa5b73
PA
6436 return;
6437
6438 case BPSTAT_WHAT_HP_STEP_RESUME:
1eb8556f 6439 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
cdaa5b73
PA
6440
6441 delete_step_resume_breakpoint (ecs->event_thread);
6442 if (ecs->event_thread->step_after_step_resume_breakpoint)
6443 {
6444 /* Back when the step-resume breakpoint was inserted, we
6445 were trying to single-step off a breakpoint. Go back to
6446 doing that. */
6447 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6448 ecs->event_thread->stepping_over_breakpoint = 1;
6449 keep_going (ecs);
6450 return;
e5ef252a 6451 }
cdaa5b73
PA
6452 break;
6453
6454 case BPSTAT_WHAT_KEEP_CHECKING:
6455 break;
e5ef252a 6456 }
c906108c 6457
af48d08f
PA
6458 /* If we stepped a permanent breakpoint and we had a high priority
6459 step-resume breakpoint for the address we stepped, but we didn't
6460 hit it, then we must have stepped into the signal handler. The
6461 step-resume was only necessary to catch the case of _not_
6462 stepping into the handler, so delete it, and fall through to
6463 checking whether the step finished. */
6464 if (ecs->event_thread->stepped_breakpoint)
6465 {
6466 struct breakpoint *sr_bp
6467 = ecs->event_thread->control.step_resume_breakpoint;
6468
8d707a12
PA
6469 if (sr_bp != NULL
6470 && sr_bp->loc->permanent
af48d08f
PA
6471 && sr_bp->type == bp_hp_step_resume
6472 && sr_bp->loc->address == ecs->event_thread->prev_pc)
6473 {
1eb8556f 6474 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
af48d08f
PA
6475 delete_step_resume_breakpoint (ecs->event_thread);
6476 ecs->event_thread->step_after_step_resume_breakpoint = 0;
6477 }
6478 }
6479
cdaa5b73
PA
6480 /* We come here if we hit a breakpoint but should not stop for it.
6481 Possibly we also were stepping and should stop for that. So fall
6482 through and test for stepping. But, if not stepping, do not
6483 stop. */
c906108c 6484
a7212384
UW
6485 /* In all-stop mode, if we're currently stepping but have stopped in
6486 some other thread, we need to switch back to the stepped thread. */
c447ac0b
PA
6487 if (switch_back_to_stepped_thread (ecs))
6488 return;
776f04fa 6489
8358c15c 6490 if (ecs->event_thread->control.step_resume_breakpoint)
488f131b 6491 {
1eb8556f 6492 infrun_debug_printf ("step-resume breakpoint is inserted");
527159b7 6493
488f131b 6494 /* Having a step-resume breakpoint overrides anything
dda83cd7
SM
6495 else having to do with stepping commands until
6496 that breakpoint is reached. */
488f131b
JB
6497 keep_going (ecs);
6498 return;
6499 }
c5aa993b 6500
16c381f0 6501 if (ecs->event_thread->control.step_range_end == 0)
488f131b 6502 {
1eb8556f 6503 infrun_debug_printf ("no stepping, continue");
488f131b 6504 /* Likewise if we aren't even stepping. */
488f131b
JB
6505 keep_going (ecs);
6506 return;
6507 }
c5aa993b 6508
4b7703ad
JB
6509 /* Re-fetch current thread's frame in case the code above caused
6510 the frame cache to be re-initialized, making our FRAME variable
6511 a dangling pointer. */
6512 frame = get_current_frame ();
628fe4e4 6513 gdbarch = get_frame_arch (frame);
7e324e48 6514 fill_in_stop_func (gdbarch, ecs);
4b7703ad 6515
488f131b 6516 /* If stepping through a line, keep going if still within it.
c906108c 6517
488f131b
JB
6518 Note that step_range_end is the address of the first instruction
6519 beyond the step range, and NOT the address of the last instruction
31410e84
MS
6520 within it!
6521
6522 Note also that during reverse execution, we may be stepping
6523 through a function epilogue and therefore must detect when
6524 the current-frame changes in the middle of a line. */
6525
f2ffa92b
PA
6526 if (pc_in_thread_step_range (ecs->event_thread->suspend.stop_pc,
6527 ecs->event_thread)
31410e84 6528 && (execution_direction != EXEC_REVERSE
388a8562 6529 || frame_id_eq (get_frame_id (frame),
16c381f0 6530 ecs->event_thread->control.step_frame_id)))
488f131b 6531 {
1eb8556f
SM
6532 infrun_debug_printf
6533 ("stepping inside range [%s-%s]",
6534 paddress (gdbarch, ecs->event_thread->control.step_range_start),
6535 paddress (gdbarch, ecs->event_thread->control.step_range_end));
b2175913 6536
c1e36e3e
PA
6537 /* Tentatively re-enable range stepping; `resume' disables it if
6538 necessary (e.g., if we're stepping over a breakpoint or we
6539 have software watchpoints). */
6540 ecs->event_thread->control.may_range_step = 1;
6541
b2175913
MS
6542 /* When stepping backward, stop at beginning of line range
6543 (unless it's the function entry point, in which case
6544 keep going back to the call point). */
f2ffa92b 6545 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
16c381f0 6546 if (stop_pc == ecs->event_thread->control.step_range_start
b2175913
MS
6547 && stop_pc != ecs->stop_func_start
6548 && execution_direction == EXEC_REVERSE)
bdc36728 6549 end_stepping_range (ecs);
b2175913
MS
6550 else
6551 keep_going (ecs);
6552
488f131b
JB
6553 return;
6554 }
c5aa993b 6555
488f131b 6556 /* We stepped out of the stepping range. */
c906108c 6557
488f131b 6558 /* If we are stepping at the source level and entered the runtime
388a8562
MS
6559 loader dynamic symbol resolution code...
6560
6561 EXEC_FORWARD: we keep on single stepping until we exit the run
6562 time loader code and reach the callee's address.
6563
6564 EXEC_REVERSE: we've already executed the callee (backward), and
6565 the runtime loader code is handled just like any other
6566 undebuggable function call. Now we need only keep stepping
6567 backward through the trampoline code, and that's handled further
6568 down, so there is nothing for us to do here. */
6569
6570 if (execution_direction != EXEC_REVERSE
16c381f0 6571 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
f2ffa92b 6572 && in_solib_dynsym_resolve_code (ecs->event_thread->suspend.stop_pc))
488f131b 6573 {
4c8c40e6 6574 CORE_ADDR pc_after_resolver =
f2ffa92b
PA
6575 gdbarch_skip_solib_resolver (gdbarch,
6576 ecs->event_thread->suspend.stop_pc);
c906108c 6577
1eb8556f 6578 infrun_debug_printf ("stepped into dynsym resolve code");
527159b7 6579
488f131b
JB
6580 if (pc_after_resolver)
6581 {
6582 /* Set up a step-resume breakpoint at the address
6583 indicated by SKIP_SOLIB_RESOLVER. */
51abb421 6584 symtab_and_line sr_sal;
488f131b 6585 sr_sal.pc = pc_after_resolver;
6c95b8df 6586 sr_sal.pspace = get_frame_program_space (frame);
488f131b 6587
a6d9a66e
UW
6588 insert_step_resume_breakpoint_at_sal (gdbarch,
6589 sr_sal, null_frame_id);
c5aa993b 6590 }
c906108c 6591
488f131b
JB
6592 keep_going (ecs);
6593 return;
6594 }
c906108c 6595
1d509aa6
MM
6596 /* Step through an indirect branch thunk. */
6597 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
f2ffa92b
PA
6598 && gdbarch_in_indirect_branch_thunk (gdbarch,
6599 ecs->event_thread->suspend.stop_pc))
1d509aa6 6600 {
1eb8556f 6601 infrun_debug_printf ("stepped into indirect branch thunk");
1d509aa6
MM
6602 keep_going (ecs);
6603 return;
6604 }
6605
16c381f0
JK
6606 if (ecs->event_thread->control.step_range_end != 1
6607 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
6608 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
568d6575 6609 && get_frame_type (frame) == SIGTRAMP_FRAME)
488f131b 6610 {
1eb8556f 6611 infrun_debug_printf ("stepped into signal trampoline");
42edda50 6612 /* The inferior, while doing a "step" or "next", has ended up in
dda83cd7
SM
6613 a signal trampoline (either by a signal being delivered or by
6614 the signal handler returning). Just single-step until the
6615 inferior leaves the trampoline (either by calling the handler
6616 or returning). */
488f131b
JB
6617 keep_going (ecs);
6618 return;
6619 }
c906108c 6620
14132e89
MR
6621 /* If we're in the return path from a shared library trampoline,
6622 we want to proceed through the trampoline when stepping. */
6623 /* macro/2012-04-25: This needs to come before the subroutine
6624 call check below as on some targets return trampolines look
6625 like subroutine calls (MIPS16 return thunks). */
6626 if (gdbarch_in_solib_return_trampoline (gdbarch,
f2ffa92b
PA
6627 ecs->event_thread->suspend.stop_pc,
6628 ecs->stop_func_name)
14132e89
MR
6629 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
6630 {
6631 /* Determine where this trampoline returns. */
f2ffa92b
PA
6632 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6633 CORE_ADDR real_stop_pc
6634 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
14132e89 6635
1eb8556f 6636 infrun_debug_printf ("stepped into solib return tramp");
14132e89
MR
6637
6638 /* Only proceed through if we know where it's going. */
6639 if (real_stop_pc)
6640 {
6641 /* And put the step-breakpoint there and go until there. */
51abb421 6642 symtab_and_line sr_sal;
14132e89
MR
6643 sr_sal.pc = real_stop_pc;
6644 sr_sal.section = find_pc_overlay (sr_sal.pc);
6645 sr_sal.pspace = get_frame_program_space (frame);
6646
6647 /* Do not specify what the fp should be when we stop since
6648 on some machines the prologue is where the new fp value
6649 is established. */
6650 insert_step_resume_breakpoint_at_sal (gdbarch,
6651 sr_sal, null_frame_id);
6652
6653 /* Restart without fiddling with the step ranges or
6654 other state. */
6655 keep_going (ecs);
6656 return;
6657 }
6658 }
6659
c17eaafe
DJ
6660 /* Check for subroutine calls. The check for the current frame
6661 equalling the step ID is not necessary - the check of the
6662 previous frame's ID is sufficient - but it is a common case and
6663 cheaper than checking the previous frame's ID.
14e60db5
DJ
6664
6665 NOTE: frame_id_eq will never report two invalid frame IDs as
6666 being equal, so to get into this block, both the current and
6667 previous frame must have valid frame IDs. */
005ca36a
JB
6668 /* The outer_frame_id check is a heuristic to detect stepping
6669 through startup code. If we step over an instruction which
6670 sets the stack pointer from an invalid value to a valid value,
6671 we may detect that as a subroutine call from the mythical
6672 "outermost" function. This could be fixed by marking
6673 outermost frames as !stack_p,code_p,special_p. Then the
6674 initial outermost frame, before sp was valid, would
ce6cca6d 6675 have code_addr == &_start. See the comment in frame_id_eq
005ca36a 6676 for more. */
edb3359d 6677 if (!frame_id_eq (get_stack_frame_id (frame),
16c381f0 6678 ecs->event_thread->control.step_stack_frame_id)
005ca36a 6679 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
16c381f0
JK
6680 ecs->event_thread->control.step_stack_frame_id)
6681 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
005ca36a 6682 outer_frame_id)
885eeb5b 6683 || (ecs->event_thread->control.step_start_function
f2ffa92b 6684 != find_pc_function (ecs->event_thread->suspend.stop_pc)))))
488f131b 6685 {
f2ffa92b 6686 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
95918acb 6687 CORE_ADDR real_stop_pc;
8fb3e588 6688
1eb8556f 6689 infrun_debug_printf ("stepped into subroutine");
527159b7 6690
b7a084be 6691 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
95918acb
AC
6692 {
6693 /* I presume that step_over_calls is only 0 when we're
6694 supposed to be stepping at the assembly language level
6695 ("stepi"). Just stop. */
388a8562 6696 /* And this works the same backward as frontward. MVS */
bdc36728 6697 end_stepping_range (ecs);
95918acb
AC
6698 return;
6699 }
8fb3e588 6700
388a8562
MS
6701 /* Reverse stepping through solib trampolines. */
6702
6703 if (execution_direction == EXEC_REVERSE
16c381f0 6704 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
388a8562
MS
6705 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6706 || (ecs->stop_func_start == 0
6707 && in_solib_dynsym_resolve_code (stop_pc))))
6708 {
6709 /* Any solib trampoline code can be handled in reverse
6710 by simply continuing to single-step. We have already
6711 executed the solib function (backwards), and a few
6712 steps will take us back through the trampoline to the
6713 caller. */
6714 keep_going (ecs);
6715 return;
6716 }
6717
16c381f0 6718 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
8567c30f 6719 {
b2175913
MS
6720 /* We're doing a "next".
6721
6722 Normal (forward) execution: set a breakpoint at the
6723 callee's return address (the address at which the caller
6724 will resume).
6725
6726 Reverse (backward) execution. set the step-resume
6727 breakpoint at the start of the function that we just
6728 stepped into (backwards), and continue to there. When we
6130d0b7 6729 get there, we'll need to single-step back to the caller. */
b2175913
MS
6730
6731 if (execution_direction == EXEC_REVERSE)
6732 {
acf9414f
JK
6733 /* If we're already at the start of the function, we've either
6734 just stepped backward into a single instruction function,
6735 or stepped back out of a signal handler to the first instruction
6736 of the function. Just keep going, which will single-step back
6737 to the caller. */
58c48e72 6738 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
acf9414f 6739 {
acf9414f 6740 /* Normal function call return (static or dynamic). */
51abb421 6741 symtab_and_line sr_sal;
acf9414f
JK
6742 sr_sal.pc = ecs->stop_func_start;
6743 sr_sal.pspace = get_frame_program_space (frame);
6744 insert_step_resume_breakpoint_at_sal (gdbarch,
6745 sr_sal, null_frame_id);
6746 }
b2175913
MS
6747 }
6748 else
568d6575 6749 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6750
8567c30f
AC
6751 keep_going (ecs);
6752 return;
6753 }
a53c66de 6754
95918acb 6755 /* If we are in a function call trampoline (a stub between the
dda83cd7
SM
6756 calling routine and the real function), locate the real
6757 function. That's what tells us (a) whether we want to step
6758 into it at all, and (b) what prologue we want to run to the
6759 end of, if we do step into it. */
568d6575 6760 real_stop_pc = skip_language_trampoline (frame, stop_pc);
95918acb 6761 if (real_stop_pc == 0)
568d6575 6762 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
95918acb
AC
6763 if (real_stop_pc != 0)
6764 ecs->stop_func_start = real_stop_pc;
8fb3e588 6765
db5f024e 6766 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
1b2bfbb9 6767 {
51abb421 6768 symtab_and_line sr_sal;
1b2bfbb9 6769 sr_sal.pc = ecs->stop_func_start;
6c95b8df 6770 sr_sal.pspace = get_frame_program_space (frame);
1b2bfbb9 6771
a6d9a66e
UW
6772 insert_step_resume_breakpoint_at_sal (gdbarch,
6773 sr_sal, null_frame_id);
8fb3e588
AC
6774 keep_going (ecs);
6775 return;
1b2bfbb9
RC
6776 }
6777
95918acb 6778 /* If we have line number information for the function we are
1bfeeb0f
JL
6779 thinking of stepping into and the function isn't on the skip
6780 list, step into it.
95918acb 6781
dda83cd7
SM
6782 If there are several symtabs at that PC (e.g. with include
6783 files), just want to know whether *any* of them have line
6784 numbers. find_pc_line handles this. */
95918acb
AC
6785 {
6786 struct symtab_and_line tmp_sal;
8fb3e588 6787
95918acb 6788 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
2b914b52 6789 if (tmp_sal.line != 0
85817405 6790 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4a4c04f1
BE
6791 tmp_sal)
6792 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
95918acb 6793 {
b2175913 6794 if (execution_direction == EXEC_REVERSE)
568d6575 6795 handle_step_into_function_backward (gdbarch, ecs);
b2175913 6796 else
568d6575 6797 handle_step_into_function (gdbarch, ecs);
95918acb
AC
6798 return;
6799 }
6800 }
6801
6802 /* If we have no line number and the step-stop-if-no-debug is
dda83cd7
SM
6803 set, we stop the step so that the user has a chance to switch
6804 in assembly mode. */
16c381f0 6805 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
078130d0 6806 && step_stop_if_no_debug)
95918acb 6807 {
bdc36728 6808 end_stepping_range (ecs);
95918acb
AC
6809 return;
6810 }
6811
b2175913
MS
6812 if (execution_direction == EXEC_REVERSE)
6813 {
acf9414f
JK
6814 /* If we're already at the start of the function, we've either just
6815 stepped backward into a single instruction function without line
6816 number info, or stepped back out of a signal handler to the first
6817 instruction of the function without line number info. Just keep
6818 going, which will single-step back to the caller. */
6819 if (ecs->stop_func_start != stop_pc)
6820 {
6821 /* Set a breakpoint at callee's start address.
6822 From there we can step once and be back in the caller. */
51abb421 6823 symtab_and_line sr_sal;
acf9414f
JK
6824 sr_sal.pc = ecs->stop_func_start;
6825 sr_sal.pspace = get_frame_program_space (frame);
6826 insert_step_resume_breakpoint_at_sal (gdbarch,
6827 sr_sal, null_frame_id);
6828 }
b2175913
MS
6829 }
6830 else
6831 /* Set a breakpoint at callee's return address (the address
6832 at which the caller will resume). */
568d6575 6833 insert_step_resume_breakpoint_at_caller (frame);
b2175913 6834
95918acb 6835 keep_going (ecs);
488f131b 6836 return;
488f131b 6837 }
c906108c 6838
fdd654f3
MS
6839 /* Reverse stepping through solib trampolines. */
6840
6841 if (execution_direction == EXEC_REVERSE
16c381f0 6842 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
fdd654f3 6843 {
f2ffa92b
PA
6844 CORE_ADDR stop_pc = ecs->event_thread->suspend.stop_pc;
6845
fdd654f3
MS
6846 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
6847 || (ecs->stop_func_start == 0
6848 && in_solib_dynsym_resolve_code (stop_pc)))
6849 {
6850 /* Any solib trampoline code can be handled in reverse
6851 by simply continuing to single-step. We have already
6852 executed the solib function (backwards), and a few
6853 steps will take us back through the trampoline to the
6854 caller. */
6855 keep_going (ecs);
6856 return;
6857 }
6858 else if (in_solib_dynsym_resolve_code (stop_pc))
6859 {
6860 /* Stepped backward into the solib dynsym resolver.
6861 Set a breakpoint at its start and continue, then
6862 one more step will take us out. */
51abb421 6863 symtab_and_line sr_sal;
fdd654f3 6864 sr_sal.pc = ecs->stop_func_start;
9d1807c3 6865 sr_sal.pspace = get_frame_program_space (frame);
fdd654f3
MS
6866 insert_step_resume_breakpoint_at_sal (gdbarch,
6867 sr_sal, null_frame_id);
6868 keep_going (ecs);
6869 return;
6870 }
6871 }
6872
8c95582d
AB
6873 /* This always returns the sal for the inner-most frame when we are in a
6874 stack of inlined frames, even if GDB actually believes that it is in a
6875 more outer frame. This is checked for below by calls to
6876 inline_skipped_frames. */
f2ffa92b 6877 stop_pc_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
7ed0fe66 6878
1b2bfbb9
RC
6879 /* NOTE: tausq/2004-05-24: This if block used to be done before all
6880 the trampoline processing logic, however, there are some trampolines
6881 that have no names, so we should do trampoline handling first. */
16c381f0 6882 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7ed0fe66 6883 && ecs->stop_func_name == NULL
2afb61aa 6884 && stop_pc_sal.line == 0)
1b2bfbb9 6885 {
1eb8556f 6886 infrun_debug_printf ("stepped into undebuggable function");
527159b7 6887
1b2bfbb9 6888 /* The inferior just stepped into, or returned to, an
dda83cd7
SM
6889 undebuggable function (where there is no debugging information
6890 and no line number corresponding to the address where the
6891 inferior stopped). Since we want to skip this kind of code,
6892 we keep going until the inferior returns from this
6893 function - unless the user has asked us not to (via
6894 set step-mode) or we no longer know how to get back
6895 to the call site. */
14e60db5 6896 if (step_stop_if_no_debug
c7ce8faa 6897 || !frame_id_p (frame_unwind_caller_id (frame)))
1b2bfbb9
RC
6898 {
6899 /* If we have no line number and the step-stop-if-no-debug
6900 is set, we stop the step so that the user has a chance to
6901 switch in assembly mode. */
bdc36728 6902 end_stepping_range (ecs);
1b2bfbb9
RC
6903 return;
6904 }
6905 else
6906 {
6907 /* Set a breakpoint at callee's return address (the address
6908 at which the caller will resume). */
568d6575 6909 insert_step_resume_breakpoint_at_caller (frame);
1b2bfbb9
RC
6910 keep_going (ecs);
6911 return;
6912 }
6913 }
6914
16c381f0 6915 if (ecs->event_thread->control.step_range_end == 1)
1b2bfbb9
RC
6916 {
6917 /* It is stepi or nexti. We always want to stop stepping after
dda83cd7 6918 one instruction. */
1eb8556f 6919 infrun_debug_printf ("stepi/nexti");
bdc36728 6920 end_stepping_range (ecs);
1b2bfbb9
RC
6921 return;
6922 }
6923
2afb61aa 6924 if (stop_pc_sal.line == 0)
488f131b
JB
6925 {
6926 /* We have no line number information. That means to stop
dda83cd7
SM
6927 stepping (does this always happen right after one instruction,
6928 when we do "s" in a function with no line numbers,
6929 or can this happen as a result of a return or longjmp?). */
1eb8556f 6930 infrun_debug_printf ("line number info");
bdc36728 6931 end_stepping_range (ecs);
488f131b
JB
6932 return;
6933 }
c906108c 6934
edb3359d
DJ
6935 /* Look for "calls" to inlined functions, part one. If the inline
6936 frame machinery detected some skipped call sites, we have entered
6937 a new inline function. */
6938
6939 if (frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6940 ecs->event_thread->control.step_frame_id)
00431a78 6941 && inline_skipped_frames (ecs->event_thread))
edb3359d 6942 {
1eb8556f 6943 infrun_debug_printf ("stepped into inlined function");
edb3359d 6944
51abb421 6945 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
edb3359d 6946
16c381f0 6947 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
edb3359d
DJ
6948 {
6949 /* For "step", we're going to stop. But if the call site
6950 for this inlined function is on the same source line as
6951 we were previously stepping, go down into the function
6952 first. Otherwise stop at the call site. */
6953
6954 if (call_sal.line == ecs->event_thread->current_line
6955 && call_sal.symtab == ecs->event_thread->current_symtab)
4a4c04f1
BE
6956 {
6957 step_into_inline_frame (ecs->event_thread);
6958 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
6959 {
6960 keep_going (ecs);
6961 return;
6962 }
6963 }
edb3359d 6964
bdc36728 6965 end_stepping_range (ecs);
edb3359d
DJ
6966 return;
6967 }
6968 else
6969 {
6970 /* For "next", we should stop at the call site if it is on a
6971 different source line. Otherwise continue through the
6972 inlined function. */
6973 if (call_sal.line == ecs->event_thread->current_line
6974 && call_sal.symtab == ecs->event_thread->current_symtab)
6975 keep_going (ecs);
6976 else
bdc36728 6977 end_stepping_range (ecs);
edb3359d
DJ
6978 return;
6979 }
6980 }
6981
6982 /* Look for "calls" to inlined functions, part two. If we are still
6983 in the same real function we were stepping through, but we have
6984 to go further up to find the exact frame ID, we are stepping
6985 through a more inlined call beyond its call site. */
6986
6987 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
6988 && !frame_id_eq (get_frame_id (get_current_frame ()),
16c381f0 6989 ecs->event_thread->control.step_frame_id)
edb3359d 6990 && stepped_in_from (get_current_frame (),
16c381f0 6991 ecs->event_thread->control.step_frame_id))
edb3359d 6992 {
1eb8556f 6993 infrun_debug_printf ("stepping through inlined function");
edb3359d 6994
4a4c04f1
BE
6995 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
6996 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
edb3359d
DJ
6997 keep_going (ecs);
6998 else
bdc36728 6999 end_stepping_range (ecs);
edb3359d
DJ
7000 return;
7001 }
7002
8c95582d 7003 bool refresh_step_info = true;
f2ffa92b 7004 if ((ecs->event_thread->suspend.stop_pc == stop_pc_sal.pc)
4e1c45ea
PA
7005 && (ecs->event_thread->current_line != stop_pc_sal.line
7006 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
488f131b 7007 {
8c95582d
AB
7008 if (stop_pc_sal.is_stmt)
7009 {
7010 /* We are at the start of a different line. So stop. Note that
7011 we don't stop if we step into the middle of a different line.
7012 That is said to make things like for (;;) statements work
7013 better. */
1eb8556f 7014 infrun_debug_printf ("stepped to a different line");
8c95582d
AB
7015 end_stepping_range (ecs);
7016 return;
7017 }
7018 else if (frame_id_eq (get_frame_id (get_current_frame ()),
7019 ecs->event_thread->control.step_frame_id))
7020 {
7021 /* We are at the start of a different line, however, this line is
7022 not marked as a statement, and we have not changed frame. We
7023 ignore this line table entry, and continue stepping forward,
7024 looking for a better place to stop. */
7025 refresh_step_info = false;
1eb8556f
SM
7026 infrun_debug_printf ("stepped to a different line, but "
7027 "it's not the start of a statement");
8c95582d 7028 }
488f131b 7029 }
c906108c 7030
488f131b 7031 /* We aren't done stepping.
c906108c 7032
488f131b
JB
7033 Optimize by setting the stepping range to the line.
7034 (We might not be in the original line, but if we entered a
7035 new line in mid-statement, we continue stepping. This makes
8c95582d
AB
7036 things like for(;;) statements work better.)
7037
7038 If we entered a SAL that indicates a non-statement line table entry,
7039 then we update the stepping range, but we don't update the step info,
7040 which includes things like the line number we are stepping away from.
7041 This means we will stop when we find a line table entry that is marked
7042 as is-statement, even if it matches the non-statement one we just
7043 stepped into. */
c906108c 7044
16c381f0
JK
7045 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7046 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
c1e36e3e 7047 ecs->event_thread->control.may_range_step = 1;
8c95582d
AB
7048 if (refresh_step_info)
7049 set_step_info (ecs->event_thread, frame, stop_pc_sal);
488f131b 7050
1eb8556f 7051 infrun_debug_printf ("keep going");
488f131b 7052 keep_going (ecs);
104c1213
JM
7053}
7054
c447ac0b
PA
7055/* In all-stop mode, if we're currently stepping but have stopped in
7056 some other thread, we may need to switch back to the stepped
7057 thread. Returns true we set the inferior running, false if we left
7058 it stopped (and the event needs further processing). */
7059
c4464ade 7060static bool
c447ac0b
PA
7061switch_back_to_stepped_thread (struct execution_control_state *ecs)
7062{
fbea99ea 7063 if (!target_is_non_stop_p ())
c447ac0b 7064 {
99619bea
PA
7065 struct thread_info *stepping_thread;
7066
7067 /* If any thread is blocked on some internal breakpoint, and we
7068 simply need to step over that breakpoint to get it going
7069 again, do that first. */
7070
7071 /* However, if we see an event for the stepping thread, then we
7072 know all other threads have been moved past their breakpoints
7073 already. Let the caller check whether the step is finished,
7074 etc., before deciding to move it past a breakpoint. */
7075 if (ecs->event_thread->control.step_range_end != 0)
c4464ade 7076 return false;
99619bea
PA
7077
7078 /* Check if the current thread is blocked on an incomplete
7079 step-over, interrupted by a random signal. */
7080 if (ecs->event_thread->control.trap_expected
7081 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
c447ac0b 7082 {
1eb8556f
SM
7083 infrun_debug_printf
7084 ("need to finish step-over of [%s]",
7085 target_pid_to_str (ecs->event_thread->ptid).c_str ());
99619bea 7086 keep_going (ecs);
c4464ade 7087 return true;
99619bea 7088 }
2adfaa28 7089
99619bea
PA
7090 /* Check if the current thread is blocked by a single-step
7091 breakpoint of another thread. */
7092 if (ecs->hit_singlestep_breakpoint)
7093 {
1eb8556f
SM
7094 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7095 target_pid_to_str (ecs->ptid).c_str ());
99619bea 7096 keep_going (ecs);
c4464ade 7097 return true;
99619bea
PA
7098 }
7099
4d9d9d04
PA
7100 /* If this thread needs yet another step-over (e.g., stepping
7101 through a delay slot), do it first before moving on to
7102 another thread. */
7103 if (thread_still_needs_step_over (ecs->event_thread))
7104 {
1eb8556f
SM
7105 infrun_debug_printf
7106 ("thread [%s] still needs step-over",
7107 target_pid_to_str (ecs->event_thread->ptid).c_str ());
4d9d9d04 7108 keep_going (ecs);
c4464ade 7109 return true;
4d9d9d04 7110 }
70509625 7111
483805cf
PA
7112 /* If scheduler locking applies even if not stepping, there's no
7113 need to walk over threads. Above we've checked whether the
7114 current thread is stepping. If some other thread not the
7115 event thread is stepping, then it must be that scheduler
7116 locking is not in effect. */
856e7dd6 7117 if (schedlock_applies (ecs->event_thread))
c4464ade 7118 return false;
483805cf 7119
4d9d9d04
PA
7120 /* Otherwise, we no longer expect a trap in the current thread.
7121 Clear the trap_expected flag before switching back -- this is
7122 what keep_going does as well, if we call it. */
7123 ecs->event_thread->control.trap_expected = 0;
7124
7125 /* Likewise, clear the signal if it should not be passed. */
7126 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7127 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7128
7129 /* Do all pending step-overs before actually proceeding with
483805cf 7130 step/next/etc. */
4d9d9d04
PA
7131 if (start_step_over ())
7132 {
7133 prepare_to_wait (ecs);
c4464ade 7134 return true;
4d9d9d04
PA
7135 }
7136
7137 /* Look for the stepping/nexting thread. */
483805cf 7138 stepping_thread = NULL;
4d9d9d04 7139
08036331 7140 for (thread_info *tp : all_non_exited_threads ())
dda83cd7 7141 {
f3f8ece4
PA
7142 switch_to_thread_no_regs (tp);
7143
fbea99ea
PA
7144 /* Ignore threads of processes the caller is not
7145 resuming. */
483805cf 7146 if (!sched_multi
5b6d1e4f
PA
7147 && (tp->inf->process_target () != ecs->target
7148 || tp->inf->pid != ecs->ptid.pid ()))
483805cf
PA
7149 continue;
7150
7151 /* When stepping over a breakpoint, we lock all threads
7152 except the one that needs to move past the breakpoint.
7153 If a non-event thread has this set, the "incomplete
7154 step-over" check above should have caught it earlier. */
372316f1
PA
7155 if (tp->control.trap_expected)
7156 {
7157 internal_error (__FILE__, __LINE__,
7158 "[%s] has inconsistent state: "
7159 "trap_expected=%d\n",
a068643d 7160 target_pid_to_str (tp->ptid).c_str (),
372316f1
PA
7161 tp->control.trap_expected);
7162 }
483805cf
PA
7163
7164 /* Did we find the stepping thread? */
7165 if (tp->control.step_range_end)
7166 {
7167 /* Yep. There should only one though. */
7168 gdb_assert (stepping_thread == NULL);
7169
7170 /* The event thread is handled at the top, before we
7171 enter this loop. */
7172 gdb_assert (tp != ecs->event_thread);
7173
7174 /* If some thread other than the event thread is
7175 stepping, then scheduler locking can't be in effect,
7176 otherwise we wouldn't have resumed the current event
7177 thread in the first place. */
856e7dd6 7178 gdb_assert (!schedlock_applies (tp));
483805cf
PA
7179
7180 stepping_thread = tp;
7181 }
99619bea
PA
7182 }
7183
483805cf 7184 if (stepping_thread != NULL)
99619bea 7185 {
1eb8556f 7186 infrun_debug_printf ("switching back to stepped thread");
c447ac0b 7187
2ac7589c
PA
7188 if (keep_going_stepped_thread (stepping_thread))
7189 {
7190 prepare_to_wait (ecs);
c4464ade 7191 return true;
2ac7589c
PA
7192 }
7193 }
f3f8ece4
PA
7194
7195 switch_to_thread (ecs->event_thread);
2ac7589c 7196 }
2adfaa28 7197
c4464ade 7198 return false;
2ac7589c 7199}
2adfaa28 7200
2ac7589c
PA
7201/* Set a previously stepped thread back to stepping. Returns true on
7202 success, false if the resume is not possible (e.g., the thread
7203 vanished). */
7204
c4464ade 7205static bool
2ac7589c
PA
7206keep_going_stepped_thread (struct thread_info *tp)
7207{
7208 struct frame_info *frame;
2ac7589c
PA
7209 struct execution_control_state ecss;
7210 struct execution_control_state *ecs = &ecss;
2adfaa28 7211
2ac7589c
PA
7212 /* If the stepping thread exited, then don't try to switch back and
7213 resume it, which could fail in several different ways depending
7214 on the target. Instead, just keep going.
2adfaa28 7215
2ac7589c
PA
7216 We can find a stepping dead thread in the thread list in two
7217 cases:
2adfaa28 7218
2ac7589c
PA
7219 - The target supports thread exit events, and when the target
7220 tries to delete the thread from the thread list, inferior_ptid
7221 pointed at the exiting thread. In such case, calling
7222 delete_thread does not really remove the thread from the list;
7223 instead, the thread is left listed, with 'exited' state.
64ce06e4 7224
2ac7589c
PA
7225 - The target's debug interface does not support thread exit
7226 events, and so we have no idea whatsoever if the previously
7227 stepping thread is still alive. For that reason, we need to
7228 synchronously query the target now. */
2adfaa28 7229
00431a78 7230 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
2ac7589c 7231 {
1eb8556f
SM
7232 infrun_debug_printf ("not resuming previously stepped thread, it has "
7233 "vanished");
2ac7589c 7234
00431a78 7235 delete_thread (tp);
c4464ade 7236 return false;
c447ac0b 7237 }
2ac7589c 7238
1eb8556f 7239 infrun_debug_printf ("resuming previously stepped thread");
2ac7589c
PA
7240
7241 reset_ecs (ecs, tp);
00431a78 7242 switch_to_thread (tp);
2ac7589c 7243
f2ffa92b 7244 tp->suspend.stop_pc = regcache_read_pc (get_thread_regcache (tp));
2ac7589c 7245 frame = get_current_frame ();
2ac7589c
PA
7246
7247 /* If the PC of the thread we were trying to single-step has
7248 changed, then that thread has trapped or been signaled, but the
7249 event has not been reported to GDB yet. Re-poll the target
7250 looking for this particular thread's event (i.e. temporarily
7251 enable schedlock) by:
7252
7253 - setting a break at the current PC
7254 - resuming that particular thread, only (by setting trap
7255 expected)
7256
7257 This prevents us continuously moving the single-step breakpoint
7258 forward, one instruction at a time, overstepping. */
7259
f2ffa92b 7260 if (tp->suspend.stop_pc != tp->prev_pc)
2ac7589c
PA
7261 {
7262 ptid_t resume_ptid;
7263
1eb8556f
SM
7264 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
7265 paddress (target_gdbarch (), tp->prev_pc),
7266 paddress (target_gdbarch (), tp->suspend.stop_pc));
2ac7589c
PA
7267
7268 /* Clear the info of the previous step-over, as it's no longer
7269 valid (if the thread was trying to step over a breakpoint, it
7270 has already succeeded). It's what keep_going would do too,
7271 if we called it. Do this before trying to insert the sss
7272 breakpoint, otherwise if we were previously trying to step
7273 over this exact address in another thread, the breakpoint is
7274 skipped. */
7275 clear_step_over_info ();
7276 tp->control.trap_expected = 0;
7277
7278 insert_single_step_breakpoint (get_frame_arch (frame),
7279 get_frame_address_space (frame),
f2ffa92b 7280 tp->suspend.stop_pc);
2ac7589c 7281
719546c4 7282 tp->resumed = true;
fbea99ea 7283 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
c4464ade 7284 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2ac7589c
PA
7285 }
7286 else
7287 {
1eb8556f 7288 infrun_debug_printf ("expected thread still hasn't advanced");
2ac7589c
PA
7289
7290 keep_going_pass_signal (ecs);
7291 }
c4464ade
SM
7292
7293 return true;
c447ac0b
PA
7294}
7295
8b061563
PA
7296/* Is thread TP in the middle of (software or hardware)
7297 single-stepping? (Note the result of this function must never be
7298 passed directly as target_resume's STEP parameter.) */
104c1213 7299
c4464ade 7300static bool
b3444185 7301currently_stepping (struct thread_info *tp)
a7212384 7302{
8358c15c
JK
7303 return ((tp->control.step_range_end
7304 && tp->control.step_resume_breakpoint == NULL)
7305 || tp->control.trap_expected
af48d08f 7306 || tp->stepped_breakpoint
8358c15c 7307 || bpstat_should_step ());
a7212384
UW
7308}
7309
b2175913
MS
7310/* Inferior has stepped into a subroutine call with source code that
7311 we should not step over. Do step to the first line of code in
7312 it. */
c2c6d25f
JM
7313
7314static void
568d6575
UW
7315handle_step_into_function (struct gdbarch *gdbarch,
7316 struct execution_control_state *ecs)
c2c6d25f 7317{
7e324e48
GB
7318 fill_in_stop_func (gdbarch, ecs);
7319
f2ffa92b
PA
7320 compunit_symtab *cust
7321 = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7322 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7323 ecs->stop_func_start
7324 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
c2c6d25f 7325
51abb421 7326 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
c2c6d25f
JM
7327 /* Use the step_resume_break to step until the end of the prologue,
7328 even if that involves jumps (as it seems to on the vax under
7329 4.2). */
7330 /* If the prologue ends in the middle of a source line, continue to
7331 the end of that source line (if it is still within the function).
7332 Otherwise, just go to end of prologue. */
2afb61aa
PA
7333 if (stop_func_sal.end
7334 && stop_func_sal.pc != ecs->stop_func_start
7335 && stop_func_sal.end < ecs->stop_func_end)
7336 ecs->stop_func_start = stop_func_sal.end;
c2c6d25f 7337
2dbd5e30
KB
7338 /* Architectures which require breakpoint adjustment might not be able
7339 to place a breakpoint at the computed address. If so, the test
7340 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
7341 ecs->stop_func_start to an address at which a breakpoint may be
7342 legitimately placed.
8fb3e588 7343
2dbd5e30
KB
7344 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
7345 made, GDB will enter an infinite loop when stepping through
7346 optimized code consisting of VLIW instructions which contain
7347 subinstructions corresponding to different source lines. On
7348 FR-V, it's not permitted to place a breakpoint on any but the
7349 first subinstruction of a VLIW instruction. When a breakpoint is
7350 set, GDB will adjust the breakpoint address to the beginning of
7351 the VLIW instruction. Thus, we need to make the corresponding
7352 adjustment here when computing the stop address. */
8fb3e588 7353
568d6575 7354 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
2dbd5e30
KB
7355 {
7356 ecs->stop_func_start
568d6575 7357 = gdbarch_adjust_breakpoint_address (gdbarch,
8fb3e588 7358 ecs->stop_func_start);
2dbd5e30
KB
7359 }
7360
f2ffa92b 7361 if (ecs->stop_func_start == ecs->event_thread->suspend.stop_pc)
c2c6d25f
JM
7362 {
7363 /* We are already there: stop now. */
bdc36728 7364 end_stepping_range (ecs);
c2c6d25f
JM
7365 return;
7366 }
7367 else
7368 {
7369 /* Put the step-breakpoint there and go until there. */
51abb421 7370 symtab_and_line sr_sal;
c2c6d25f
JM
7371 sr_sal.pc = ecs->stop_func_start;
7372 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
6c95b8df 7373 sr_sal.pspace = get_frame_program_space (get_current_frame ());
44cbf7b5 7374
c2c6d25f 7375 /* Do not specify what the fp should be when we stop since on
dda83cd7
SM
7376 some machines the prologue is where the new fp value is
7377 established. */
a6d9a66e 7378 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
c2c6d25f
JM
7379
7380 /* And make sure stepping stops right away then. */
16c381f0 7381 ecs->event_thread->control.step_range_end
dda83cd7 7382 = ecs->event_thread->control.step_range_start;
c2c6d25f
JM
7383 }
7384 keep_going (ecs);
7385}
d4f3574e 7386
b2175913
MS
7387/* Inferior has stepped backward into a subroutine call with source
7388 code that we should not step over. Do step to the beginning of the
7389 last line of code in it. */
7390
7391static void
568d6575
UW
7392handle_step_into_function_backward (struct gdbarch *gdbarch,
7393 struct execution_control_state *ecs)
b2175913 7394{
43f3e411 7395 struct compunit_symtab *cust;
167e4384 7396 struct symtab_and_line stop_func_sal;
b2175913 7397
7e324e48
GB
7398 fill_in_stop_func (gdbarch, ecs);
7399
f2ffa92b 7400 cust = find_pc_compunit_symtab (ecs->event_thread->suspend.stop_pc);
43f3e411 7401 if (cust != NULL && compunit_language (cust) != language_asm)
46a62268
YQ
7402 ecs->stop_func_start
7403 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
b2175913 7404
f2ffa92b 7405 stop_func_sal = find_pc_line (ecs->event_thread->suspend.stop_pc, 0);
b2175913
MS
7406
7407 /* OK, we're just going to keep stepping here. */
f2ffa92b 7408 if (stop_func_sal.pc == ecs->event_thread->suspend.stop_pc)
b2175913
MS
7409 {
7410 /* We're there already. Just stop stepping now. */
bdc36728 7411 end_stepping_range (ecs);
b2175913
MS
7412 }
7413 else
7414 {
7415 /* Else just reset the step range and keep going.
7416 No step-resume breakpoint, they don't work for
7417 epilogues, which can have multiple entry paths. */
16c381f0
JK
7418 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
7419 ecs->event_thread->control.step_range_end = stop_func_sal.end;
b2175913
MS
7420 keep_going (ecs);
7421 }
7422 return;
7423}
7424
d3169d93 7425/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
44cbf7b5
AC
7426 This is used to both functions and to skip over code. */
7427
7428static void
2c03e5be
PA
7429insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
7430 struct symtab_and_line sr_sal,
7431 struct frame_id sr_id,
7432 enum bptype sr_type)
44cbf7b5 7433{
611c83ae
PA
7434 /* There should never be more than one step-resume or longjmp-resume
7435 breakpoint per thread, so we should never be setting a new
44cbf7b5 7436 step_resume_breakpoint when one is already active. */
8358c15c 7437 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
2c03e5be 7438 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
d3169d93 7439
1eb8556f
SM
7440 infrun_debug_printf ("inserting step-resume breakpoint at %s",
7441 paddress (gdbarch, sr_sal.pc));
d3169d93 7442
8358c15c 7443 inferior_thread ()->control.step_resume_breakpoint
454dafbd 7444 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
2c03e5be
PA
7445}
7446
9da8c2a0 7447void
2c03e5be
PA
7448insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
7449 struct symtab_and_line sr_sal,
7450 struct frame_id sr_id)
7451{
7452 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
7453 sr_sal, sr_id,
7454 bp_step_resume);
44cbf7b5 7455}
7ce450bd 7456
2c03e5be
PA
7457/* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
7458 This is used to skip a potential signal handler.
7ce450bd 7459
14e60db5
DJ
7460 This is called with the interrupted function's frame. The signal
7461 handler, when it returns, will resume the interrupted function at
7462 RETURN_FRAME.pc. */
d303a6c7
AC
7463
7464static void
2c03e5be 7465insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
d303a6c7 7466{
f4c1edd8 7467 gdb_assert (return_frame != NULL);
d303a6c7 7468
51abb421
PA
7469 struct gdbarch *gdbarch = get_frame_arch (return_frame);
7470
7471 symtab_and_line sr_sal;
568d6575 7472 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
d303a6c7 7473 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7474 sr_sal.pspace = get_frame_program_space (return_frame);
d303a6c7 7475
2c03e5be
PA
7476 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
7477 get_stack_frame_id (return_frame),
7478 bp_hp_step_resume);
d303a6c7
AC
7479}
7480
2c03e5be
PA
7481/* Insert a "step-resume breakpoint" at the previous frame's PC. This
7482 is used to skip a function after stepping into it (for "next" or if
7483 the called function has no debugging information).
14e60db5
DJ
7484
7485 The current function has almost always been reached by single
7486 stepping a call or return instruction. NEXT_FRAME belongs to the
7487 current function, and the breakpoint will be set at the caller's
7488 resume address.
7489
7490 This is a separate function rather than reusing
2c03e5be 7491 insert_hp_step_resume_breakpoint_at_frame in order to avoid
14e60db5 7492 get_prev_frame, which may stop prematurely (see the implementation
c7ce8faa 7493 of frame_unwind_caller_id for an example). */
14e60db5
DJ
7494
7495static void
7496insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
7497{
14e60db5
DJ
7498 /* We shouldn't have gotten here if we don't know where the call site
7499 is. */
c7ce8faa 7500 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
14e60db5 7501
51abb421 7502 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
14e60db5 7503
51abb421 7504 symtab_and_line sr_sal;
c7ce8faa
DJ
7505 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
7506 frame_unwind_caller_pc (next_frame));
14e60db5 7507 sr_sal.section = find_pc_overlay (sr_sal.pc);
6c95b8df 7508 sr_sal.pspace = frame_unwind_program_space (next_frame);
14e60db5 7509
a6d9a66e 7510 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
c7ce8faa 7511 frame_unwind_caller_id (next_frame));
14e60db5
DJ
7512}
7513
611c83ae
PA
7514/* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
7515 new breakpoint at the target of a jmp_buf. The handling of
7516 longjmp-resume uses the same mechanisms used for handling
7517 "step-resume" breakpoints. */
7518
7519static void
a6d9a66e 7520insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
611c83ae 7521{
e81a37f7
TT
7522 /* There should never be more than one longjmp-resume breakpoint per
7523 thread, so we should never be setting a new
611c83ae 7524 longjmp_resume_breakpoint when one is already active. */
e81a37f7 7525 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
611c83ae 7526
1eb8556f
SM
7527 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
7528 paddress (gdbarch, pc));
611c83ae 7529
e81a37f7 7530 inferior_thread ()->control.exception_resume_breakpoint =
454dafbd 7531 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
611c83ae
PA
7532}
7533
186c406b
TT
7534/* Insert an exception resume breakpoint. TP is the thread throwing
7535 the exception. The block B is the block of the unwinder debug hook
7536 function. FRAME is the frame corresponding to the call to this
7537 function. SYM is the symbol of the function argument holding the
7538 target PC of the exception. */
7539
7540static void
7541insert_exception_resume_breakpoint (struct thread_info *tp,
3977b71f 7542 const struct block *b,
186c406b
TT
7543 struct frame_info *frame,
7544 struct symbol *sym)
7545{
a70b8144 7546 try
186c406b 7547 {
63e43d3a 7548 struct block_symbol vsym;
186c406b
TT
7549 struct value *value;
7550 CORE_ADDR handler;
7551 struct breakpoint *bp;
7552
987012b8 7553 vsym = lookup_symbol_search_name (sym->search_name (),
de63c46b 7554 b, VAR_DOMAIN);
63e43d3a 7555 value = read_var_value (vsym.symbol, vsym.block, frame);
186c406b
TT
7556 /* If the value was optimized out, revert to the old behavior. */
7557 if (! value_optimized_out (value))
7558 {
7559 handler = value_as_address (value);
7560
1eb8556f
SM
7561 infrun_debug_printf ("exception resume at %lx",
7562 (unsigned long) handler);
186c406b
TT
7563
7564 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd
TT
7565 handler,
7566 bp_exception_resume).release ();
c70a6932
JK
7567
7568 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
7569 frame = NULL;
7570
5d5658a1 7571 bp->thread = tp->global_num;
186c406b
TT
7572 inferior_thread ()->control.exception_resume_breakpoint = bp;
7573 }
7574 }
230d2906 7575 catch (const gdb_exception_error &e)
492d29ea
PA
7576 {
7577 /* We want to ignore errors here. */
7578 }
186c406b
TT
7579}
7580
28106bc2
SDJ
7581/* A helper for check_exception_resume that sets an
7582 exception-breakpoint based on a SystemTap probe. */
7583
7584static void
7585insert_exception_resume_from_probe (struct thread_info *tp,
729662a5 7586 const struct bound_probe *probe,
28106bc2
SDJ
7587 struct frame_info *frame)
7588{
7589 struct value *arg_value;
7590 CORE_ADDR handler;
7591 struct breakpoint *bp;
7592
7593 arg_value = probe_safe_evaluate_at_pc (frame, 1);
7594 if (!arg_value)
7595 return;
7596
7597 handler = value_as_address (arg_value);
7598
1eb8556f
SM
7599 infrun_debug_printf ("exception resume at %s",
7600 paddress (probe->objfile->arch (), handler));
28106bc2
SDJ
7601
7602 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
454dafbd 7603 handler, bp_exception_resume).release ();
5d5658a1 7604 bp->thread = tp->global_num;
28106bc2
SDJ
7605 inferior_thread ()->control.exception_resume_breakpoint = bp;
7606}
7607
186c406b
TT
7608/* This is called when an exception has been intercepted. Check to
7609 see whether the exception's destination is of interest, and if so,
7610 set an exception resume breakpoint there. */
7611
7612static void
7613check_exception_resume (struct execution_control_state *ecs,
28106bc2 7614 struct frame_info *frame)
186c406b 7615{
729662a5 7616 struct bound_probe probe;
28106bc2
SDJ
7617 struct symbol *func;
7618
7619 /* First see if this exception unwinding breakpoint was set via a
7620 SystemTap probe point. If so, the probe has two arguments: the
7621 CFA and the HANDLER. We ignore the CFA, extract the handler, and
7622 set a breakpoint there. */
6bac7473 7623 probe = find_probe_by_pc (get_frame_pc (frame));
935676c9 7624 if (probe.prob)
28106bc2 7625 {
729662a5 7626 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
28106bc2
SDJ
7627 return;
7628 }
7629
7630 func = get_frame_function (frame);
7631 if (!func)
7632 return;
186c406b 7633
a70b8144 7634 try
186c406b 7635 {
3977b71f 7636 const struct block *b;
8157b174 7637 struct block_iterator iter;
186c406b
TT
7638 struct symbol *sym;
7639 int argno = 0;
7640
7641 /* The exception breakpoint is a thread-specific breakpoint on
7642 the unwinder's debug hook, declared as:
7643
7644 void _Unwind_DebugHook (void *cfa, void *handler);
7645
7646 The CFA argument indicates the frame to which control is
7647 about to be transferred. HANDLER is the destination PC.
7648
7649 We ignore the CFA and set a temporary breakpoint at HANDLER.
7650 This is not extremely efficient but it avoids issues in gdb
7651 with computing the DWARF CFA, and it also works even in weird
7652 cases such as throwing an exception from inside a signal
7653 handler. */
7654
7655 b = SYMBOL_BLOCK_VALUE (func);
7656 ALL_BLOCK_SYMBOLS (b, iter, sym)
7657 {
7658 if (!SYMBOL_IS_ARGUMENT (sym))
7659 continue;
7660
7661 if (argno == 0)
7662 ++argno;
7663 else
7664 {
7665 insert_exception_resume_breakpoint (ecs->event_thread,
7666 b, frame, sym);
7667 break;
7668 }
7669 }
7670 }
230d2906 7671 catch (const gdb_exception_error &e)
492d29ea
PA
7672 {
7673 }
186c406b
TT
7674}
7675
104c1213 7676static void
22bcd14b 7677stop_waiting (struct execution_control_state *ecs)
104c1213 7678{
1eb8556f 7679 infrun_debug_printf ("stop_waiting");
527159b7 7680
cd0fc7c3
SS
7681 /* Let callers know we don't want to wait for the inferior anymore. */
7682 ecs->wait_some_more = 0;
fbea99ea 7683
53cccef1 7684 /* If all-stop, but there exists a non-stop target, stop all
fbea99ea 7685 threads now that we're presenting the stop to the user. */
53cccef1 7686 if (!non_stop && exists_non_stop_target ())
fbea99ea 7687 stop_all_threads ();
cd0fc7c3
SS
7688}
7689
4d9d9d04
PA
7690/* Like keep_going, but passes the signal to the inferior, even if the
7691 signal is set to nopass. */
d4f3574e
SS
7692
7693static void
4d9d9d04 7694keep_going_pass_signal (struct execution_control_state *ecs)
d4f3574e 7695{
d7e15655 7696 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
372316f1 7697 gdb_assert (!ecs->event_thread->resumed);
4d9d9d04 7698
d4f3574e 7699 /* Save the pc before execution, to compare with pc after stop. */
fb14de7b 7700 ecs->event_thread->prev_pc
fc75c28b 7701 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
d4f3574e 7702
4d9d9d04 7703 if (ecs->event_thread->control.trap_expected)
d4f3574e 7704 {
4d9d9d04
PA
7705 struct thread_info *tp = ecs->event_thread;
7706
1eb8556f
SM
7707 infrun_debug_printf ("%s has trap_expected set, "
7708 "resuming to collect trap",
7709 target_pid_to_str (tp->ptid).c_str ());
4d9d9d04 7710
a9ba6bae
PA
7711 /* We haven't yet gotten our trap, and either: intercepted a
7712 non-signal event (e.g., a fork); or took a signal which we
7713 are supposed to pass through to the inferior. Simply
7714 continue. */
64ce06e4 7715 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e 7716 }
372316f1
PA
7717 else if (step_over_info_valid_p ())
7718 {
7719 /* Another thread is stepping over a breakpoint in-line. If
7720 this thread needs a step-over too, queue the request. In
7721 either case, this resume must be deferred for later. */
7722 struct thread_info *tp = ecs->event_thread;
7723
7724 if (ecs->hit_singlestep_breakpoint
7725 || thread_still_needs_step_over (tp))
7726 {
1eb8556f
SM
7727 infrun_debug_printf ("step-over already in progress: "
7728 "step-over for %s deferred",
7729 target_pid_to_str (tp->ptid).c_str ());
28d5518b 7730 global_thread_step_over_chain_enqueue (tp);
372316f1
PA
7731 }
7732 else
7733 {
1eb8556f
SM
7734 infrun_debug_printf ("step-over in progress: resume of %s deferred",
7735 target_pid_to_str (tp->ptid).c_str ());
372316f1 7736 }
372316f1 7737 }
d4f3574e
SS
7738 else
7739 {
31e77af2 7740 struct regcache *regcache = get_current_regcache ();
963f9c80
PA
7741 int remove_bp;
7742 int remove_wps;
8d297bbf 7743 step_over_what step_what;
31e77af2 7744
d4f3574e 7745 /* Either the trap was not expected, but we are continuing
a9ba6bae
PA
7746 anyway (if we got a signal, the user asked it be passed to
7747 the child)
7748 -- or --
7749 We got our expected trap, but decided we should resume from
7750 it.
d4f3574e 7751
a9ba6bae 7752 We're going to run this baby now!
d4f3574e 7753
c36b740a
VP
7754 Note that insert_breakpoints won't try to re-insert
7755 already inserted breakpoints. Therefore, we don't
7756 care if breakpoints were already inserted, or not. */
a9ba6bae 7757
31e77af2
PA
7758 /* If we need to step over a breakpoint, and we're not using
7759 displaced stepping to do so, insert all breakpoints
7760 (watchpoints, etc.) but the one we're stepping over, step one
7761 instruction, and then re-insert the breakpoint when that step
7762 is finished. */
963f9c80 7763
6c4cfb24
PA
7764 step_what = thread_still_needs_step_over (ecs->event_thread);
7765
963f9c80 7766 remove_bp = (ecs->hit_singlestep_breakpoint
6c4cfb24
PA
7767 || (step_what & STEP_OVER_BREAKPOINT));
7768 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
963f9c80 7769
cb71640d
PA
7770 /* We can't use displaced stepping if we need to step past a
7771 watchpoint. The instruction copied to the scratch pad would
7772 still trigger the watchpoint. */
7773 if (remove_bp
3fc8eb30 7774 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
45e8c884 7775 {
a01bda52 7776 set_step_over_info (regcache->aspace (),
21edc42f
YQ
7777 regcache_read_pc (regcache), remove_wps,
7778 ecs->event_thread->global_num);
45e8c884 7779 }
963f9c80 7780 else if (remove_wps)
21edc42f 7781 set_step_over_info (NULL, 0, remove_wps, -1);
372316f1
PA
7782
7783 /* If we now need to do an in-line step-over, we need to stop
7784 all other threads. Note this must be done before
7785 insert_breakpoints below, because that removes the breakpoint
7786 we're about to step over, otherwise other threads could miss
7787 it. */
fbea99ea 7788 if (step_over_info_valid_p () && target_is_non_stop_p ())
372316f1 7789 stop_all_threads ();
abbb1732 7790
31e77af2 7791 /* Stop stepping if inserting breakpoints fails. */
a70b8144 7792 try
31e77af2
PA
7793 {
7794 insert_breakpoints ();
7795 }
230d2906 7796 catch (const gdb_exception_error &e)
31e77af2
PA
7797 {
7798 exception_print (gdb_stderr, e);
22bcd14b 7799 stop_waiting (ecs);
bdf2a94a 7800 clear_step_over_info ();
31e77af2 7801 return;
d4f3574e
SS
7802 }
7803
963f9c80 7804 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
d4f3574e 7805
64ce06e4 7806 resume (ecs->event_thread->suspend.stop_signal);
d4f3574e
SS
7807 }
7808
488f131b 7809 prepare_to_wait (ecs);
d4f3574e
SS
7810}
7811
4d9d9d04
PA
7812/* Called when we should continue running the inferior, because the
7813 current event doesn't cause a user visible stop. This does the
7814 resuming part; waiting for the next event is done elsewhere. */
7815
7816static void
7817keep_going (struct execution_control_state *ecs)
7818{
7819 if (ecs->event_thread->control.trap_expected
7820 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
7821 ecs->event_thread->control.trap_expected = 0;
7822
7823 if (!signal_program[ecs->event_thread->suspend.stop_signal])
7824 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
7825 keep_going_pass_signal (ecs);
7826}
7827
104c1213
JM
7828/* This function normally comes after a resume, before
7829 handle_inferior_event exits. It takes care of any last bits of
7830 housekeeping, and sets the all-important wait_some_more flag. */
cd0fc7c3 7831
104c1213
JM
7832static void
7833prepare_to_wait (struct execution_control_state *ecs)
cd0fc7c3 7834{
1eb8556f 7835 infrun_debug_printf ("prepare_to_wait");
104c1213 7836
104c1213 7837 ecs->wait_some_more = 1;
0b333c5e 7838
42bd97a6
PA
7839 /* If the target can't async, emulate it by marking the infrun event
7840 handler such that as soon as we get back to the event-loop, we
7841 immediately end up in fetch_inferior_event again calling
7842 target_wait. */
7843 if (!target_can_async_p ())
0b333c5e 7844 mark_infrun_async_event_handler ();
c906108c 7845}
11cf8741 7846
fd664c91 7847/* We are done with the step range of a step/next/si/ni command.
b57bacec 7848 Called once for each n of a "step n" operation. */
fd664c91
PA
7849
7850static void
bdc36728 7851end_stepping_range (struct execution_control_state *ecs)
fd664c91 7852{
bdc36728 7853 ecs->event_thread->control.stop_step = 1;
bdc36728 7854 stop_waiting (ecs);
fd664c91
PA
7855}
7856
33d62d64
JK
7857/* Several print_*_reason functions to print why the inferior has stopped.
7858 We always print something when the inferior exits, or receives a signal.
7859 The rest of the cases are dealt with later on in normal_stop and
7860 print_it_typical. Ideally there should be a call to one of these
7861 print_*_reason functions functions from handle_inferior_event each time
22bcd14b 7862 stop_waiting is called.
33d62d64 7863
fd664c91
PA
7864 Note that we don't call these directly, instead we delegate that to
7865 the interpreters, through observers. Interpreters then call these
7866 with whatever uiout is right. */
33d62d64 7867
fd664c91
PA
7868void
7869print_end_stepping_range_reason (struct ui_out *uiout)
33d62d64 7870{
fd664c91 7871 /* For CLI-like interpreters, print nothing. */
33d62d64 7872
112e8700 7873 if (uiout->is_mi_like_p ())
fd664c91 7874 {
112e8700 7875 uiout->field_string ("reason",
fd664c91
PA
7876 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
7877 }
7878}
33d62d64 7879
fd664c91
PA
7880void
7881print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
11cf8741 7882{
33d62d64 7883 annotate_signalled ();
112e8700
SM
7884 if (uiout->is_mi_like_p ())
7885 uiout->field_string
7886 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
7887 uiout->text ("\nProgram terminated with signal ");
33d62d64 7888 annotate_signal_name ();
112e8700 7889 uiout->field_string ("signal-name",
2ea28649 7890 gdb_signal_to_name (siggnal));
33d62d64 7891 annotate_signal_name_end ();
112e8700 7892 uiout->text (", ");
33d62d64 7893 annotate_signal_string ();
112e8700 7894 uiout->field_string ("signal-meaning",
2ea28649 7895 gdb_signal_to_string (siggnal));
33d62d64 7896 annotate_signal_string_end ();
112e8700
SM
7897 uiout->text (".\n");
7898 uiout->text ("The program no longer exists.\n");
33d62d64
JK
7899}
7900
fd664c91
PA
7901void
7902print_exited_reason (struct ui_out *uiout, int exitstatus)
33d62d64 7903{
fda326dd 7904 struct inferior *inf = current_inferior ();
a068643d 7905 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
fda326dd 7906
33d62d64
JK
7907 annotate_exited (exitstatus);
7908 if (exitstatus)
7909 {
112e8700
SM
7910 if (uiout->is_mi_like_p ())
7911 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
6a831f06
PA
7912 std::string exit_code_str
7913 = string_printf ("0%o", (unsigned int) exitstatus);
7914 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
7915 plongest (inf->num), pidstr.c_str (),
7916 string_field ("exit-code", exit_code_str.c_str ()));
33d62d64
JK
7917 }
7918 else
11cf8741 7919 {
112e8700
SM
7920 if (uiout->is_mi_like_p ())
7921 uiout->field_string
7922 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6a831f06
PA
7923 uiout->message ("[Inferior %s (%s) exited normally]\n",
7924 plongest (inf->num), pidstr.c_str ());
33d62d64 7925 }
33d62d64
JK
7926}
7927
fd664c91
PA
7928void
7929print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
33d62d64 7930{
f303dbd6
PA
7931 struct thread_info *thr = inferior_thread ();
7932
33d62d64
JK
7933 annotate_signal ();
7934
112e8700 7935 if (uiout->is_mi_like_p ())
f303dbd6
PA
7936 ;
7937 else if (show_thread_that_caused_stop ())
33d62d64 7938 {
f303dbd6 7939 const char *name;
33d62d64 7940
112e8700 7941 uiout->text ("\nThread ");
33eca680 7942 uiout->field_string ("thread-id", print_thread_id (thr));
f303dbd6
PA
7943
7944 name = thr->name != NULL ? thr->name : target_thread_name (thr);
7945 if (name != NULL)
7946 {
112e8700 7947 uiout->text (" \"");
33eca680 7948 uiout->field_string ("name", name);
112e8700 7949 uiout->text ("\"");
f303dbd6 7950 }
33d62d64 7951 }
f303dbd6 7952 else
112e8700 7953 uiout->text ("\nProgram");
f303dbd6 7954
112e8700
SM
7955 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
7956 uiout->text (" stopped");
33d62d64
JK
7957 else
7958 {
112e8700 7959 uiout->text (" received signal ");
8b93c638 7960 annotate_signal_name ();
112e8700
SM
7961 if (uiout->is_mi_like_p ())
7962 uiout->field_string
7963 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
7964 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8b93c638 7965 annotate_signal_name_end ();
112e8700 7966 uiout->text (", ");
8b93c638 7967 annotate_signal_string ();
112e8700 7968 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
012b3a21 7969
272bb05c
JB
7970 struct regcache *regcache = get_current_regcache ();
7971 struct gdbarch *gdbarch = regcache->arch ();
7972 if (gdbarch_report_signal_info_p (gdbarch))
7973 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
7974
8b93c638 7975 annotate_signal_string_end ();
33d62d64 7976 }
112e8700 7977 uiout->text (".\n");
33d62d64 7978}
252fbfc8 7979
fd664c91
PA
7980void
7981print_no_history_reason (struct ui_out *uiout)
33d62d64 7982{
112e8700 7983 uiout->text ("\nNo more reverse-execution history.\n");
11cf8741 7984}
43ff13b4 7985
0c7e1a46
PA
7986/* Print current location without a level number, if we have changed
7987 functions or hit a breakpoint. Print source line if we have one.
7988 bpstat_print contains the logic deciding in detail what to print,
7989 based on the event(s) that just occurred. */
7990
243a9253
PA
7991static void
7992print_stop_location (struct target_waitstatus *ws)
0c7e1a46
PA
7993{
7994 int bpstat_ret;
f486487f 7995 enum print_what source_flag;
0c7e1a46
PA
7996 int do_frame_printing = 1;
7997 struct thread_info *tp = inferior_thread ();
7998
7999 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
8000 switch (bpstat_ret)
8001 {
8002 case PRINT_UNKNOWN:
8003 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8004 should) carry around the function and does (or should) use
8005 that when doing a frame comparison. */
8006 if (tp->control.stop_step
8007 && frame_id_eq (tp->control.step_frame_id,
8008 get_frame_id (get_current_frame ()))
f2ffa92b
PA
8009 && (tp->control.step_start_function
8010 == find_pc_function (tp->suspend.stop_pc)))
0c7e1a46
PA
8011 {
8012 /* Finished step, just print source line. */
8013 source_flag = SRC_LINE;
8014 }
8015 else
8016 {
8017 /* Print location and source line. */
8018 source_flag = SRC_AND_LOC;
8019 }
8020 break;
8021 case PRINT_SRC_AND_LOC:
8022 /* Print location and source line. */
8023 source_flag = SRC_AND_LOC;
8024 break;
8025 case PRINT_SRC_ONLY:
8026 source_flag = SRC_LINE;
8027 break;
8028 case PRINT_NOTHING:
8029 /* Something bogus. */
8030 source_flag = SRC_LINE;
8031 do_frame_printing = 0;
8032 break;
8033 default:
8034 internal_error (__FILE__, __LINE__, _("Unknown value."));
8035 }
8036
8037 /* The behavior of this routine with respect to the source
8038 flag is:
8039 SRC_LINE: Print only source line
8040 LOCATION: Print only location
8041 SRC_AND_LOC: Print location and source line. */
8042 if (do_frame_printing)
8043 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
243a9253
PA
8044}
8045
243a9253
PA
8046/* See infrun.h. */
8047
8048void
4c7d57e7 8049print_stop_event (struct ui_out *uiout, bool displays)
243a9253 8050{
243a9253 8051 struct target_waitstatus last;
243a9253
PA
8052 struct thread_info *tp;
8053
5b6d1e4f 8054 get_last_target_status (nullptr, nullptr, &last);
243a9253 8055
67ad9399
TT
8056 {
8057 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
0c7e1a46 8058
67ad9399 8059 print_stop_location (&last);
243a9253 8060
67ad9399 8061 /* Display the auto-display expressions. */
4c7d57e7
TT
8062 if (displays)
8063 do_displays ();
67ad9399 8064 }
243a9253
PA
8065
8066 tp = inferior_thread ();
8067 if (tp->thread_fsm != NULL
46e3ed7f 8068 && tp->thread_fsm->finished_p ())
243a9253
PA
8069 {
8070 struct return_value_info *rv;
8071
46e3ed7f 8072 rv = tp->thread_fsm->return_value ();
243a9253
PA
8073 if (rv != NULL)
8074 print_return_value (uiout, rv);
8075 }
0c7e1a46
PA
8076}
8077
388a7084
PA
8078/* See infrun.h. */
8079
8080void
8081maybe_remove_breakpoints (void)
8082{
55f6301a 8083 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
388a7084
PA
8084 {
8085 if (remove_breakpoints ())
8086 {
223ffa71 8087 target_terminal::ours_for_output ();
388a7084
PA
8088 printf_filtered (_("Cannot remove breakpoints because "
8089 "program is no longer writable.\nFurther "
8090 "execution is probably impossible.\n"));
8091 }
8092 }
8093}
8094
4c2f2a79
PA
8095/* The execution context that just caused a normal stop. */
8096
8097struct stop_context
8098{
2d844eaf 8099 stop_context ();
2d844eaf
TT
8100
8101 DISABLE_COPY_AND_ASSIGN (stop_context);
8102
8103 bool changed () const;
8104
4c2f2a79
PA
8105 /* The stop ID. */
8106 ULONGEST stop_id;
c906108c 8107
4c2f2a79 8108 /* The event PTID. */
c906108c 8109
4c2f2a79
PA
8110 ptid_t ptid;
8111
8112 /* If stopp for a thread event, this is the thread that caused the
8113 stop. */
d634cd0b 8114 thread_info_ref thread;
4c2f2a79
PA
8115
8116 /* The inferior that caused the stop. */
8117 int inf_num;
8118};
8119
2d844eaf 8120/* Initializes a new stop context. If stopped for a thread event, this
4c2f2a79
PA
8121 takes a strong reference to the thread. */
8122
2d844eaf 8123stop_context::stop_context ()
4c2f2a79 8124{
2d844eaf
TT
8125 stop_id = get_stop_id ();
8126 ptid = inferior_ptid;
8127 inf_num = current_inferior ()->num;
4c2f2a79 8128
d7e15655 8129 if (inferior_ptid != null_ptid)
4c2f2a79
PA
8130 {
8131 /* Take a strong reference so that the thread can't be deleted
8132 yet. */
d634cd0b 8133 thread = thread_info_ref::new_reference (inferior_thread ());
4c2f2a79 8134 }
4c2f2a79
PA
8135}
8136
8137/* Return true if the current context no longer matches the saved stop
8138 context. */
8139
2d844eaf
TT
8140bool
8141stop_context::changed () const
8142{
8143 if (ptid != inferior_ptid)
8144 return true;
8145 if (inf_num != current_inferior ()->num)
8146 return true;
8147 if (thread != NULL && thread->state != THREAD_STOPPED)
8148 return true;
8149 if (get_stop_id () != stop_id)
8150 return true;
8151 return false;
4c2f2a79
PA
8152}
8153
8154/* See infrun.h. */
8155
8156int
96baa820 8157normal_stop (void)
c906108c 8158{
73b65bb0 8159 struct target_waitstatus last;
73b65bb0 8160
5b6d1e4f 8161 get_last_target_status (nullptr, nullptr, &last);
73b65bb0 8162
4c2f2a79
PA
8163 new_stop_id ();
8164
29f49a6a
PA
8165 /* If an exception is thrown from this point on, make sure to
8166 propagate GDB's knowledge of the executing state to the
8167 frontend/user running state. A QUIT is an easy exception to see
8168 here, so do this before any filtered output. */
731f534f 8169
5b6d1e4f 8170 ptid_t finish_ptid = null_ptid;
731f534f 8171
c35b1492 8172 if (!non_stop)
5b6d1e4f 8173 finish_ptid = minus_one_ptid;
e1316e60
PA
8174 else if (last.kind == TARGET_WAITKIND_SIGNALLED
8175 || last.kind == TARGET_WAITKIND_EXITED)
8176 {
8177 /* On some targets, we may still have live threads in the
8178 inferior when we get a process exit event. E.g., for
8179 "checkpoint", when the current checkpoint/fork exits,
8180 linux-fork.c automatically switches to another fork from
8181 within target_mourn_inferior. */
731f534f 8182 if (inferior_ptid != null_ptid)
5b6d1e4f 8183 finish_ptid = ptid_t (inferior_ptid.pid ());
e1316e60
PA
8184 }
8185 else if (last.kind != TARGET_WAITKIND_NO_RESUMED)
5b6d1e4f
PA
8186 finish_ptid = inferior_ptid;
8187
8188 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
8189 if (finish_ptid != null_ptid)
8190 {
8191 maybe_finish_thread_state.emplace
8192 (user_visible_resume_target (finish_ptid), finish_ptid);
8193 }
29f49a6a 8194
b57bacec
PA
8195 /* As we're presenting a stop, and potentially removing breakpoints,
8196 update the thread list so we can tell whether there are threads
8197 running on the target. With target remote, for example, we can
8198 only learn about new threads when we explicitly update the thread
8199 list. Do this before notifying the interpreters about signal
8200 stops, end of stepping ranges, etc., so that the "new thread"
8201 output is emitted before e.g., "Program received signal FOO",
8202 instead of after. */
8203 update_thread_list ();
8204
8205 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
76727919 8206 gdb::observers::signal_received.notify (inferior_thread ()->suspend.stop_signal);
b57bacec 8207
c906108c
SS
8208 /* As with the notification of thread events, we want to delay
8209 notifying the user that we've switched thread context until
8210 the inferior actually stops.
8211
73b65bb0
DJ
8212 There's no point in saying anything if the inferior has exited.
8213 Note that SIGNALLED here means "exited with a signal", not
b65dc60b
PA
8214 "received a signal".
8215
8216 Also skip saying anything in non-stop mode. In that mode, as we
8217 don't want GDB to switch threads behind the user's back, to avoid
8218 races where the user is typing a command to apply to thread x,
8219 but GDB switches to thread y before the user finishes entering
8220 the command, fetch_inferior_event installs a cleanup to restore
8221 the current thread back to the thread the user had selected right
8222 after this event is handled, so we're not really switching, only
8223 informing of a stop. */
4f8d22e3 8224 if (!non_stop
731f534f 8225 && previous_inferior_ptid != inferior_ptid
55f6301a 8226 && target_has_execution ()
73b65bb0 8227 && last.kind != TARGET_WAITKIND_SIGNALLED
0e5bf2a8
PA
8228 && last.kind != TARGET_WAITKIND_EXITED
8229 && last.kind != TARGET_WAITKIND_NO_RESUMED)
c906108c 8230 {
0e454242 8231 SWITCH_THRU_ALL_UIS ()
3b12939d 8232 {
223ffa71 8233 target_terminal::ours_for_output ();
3b12939d 8234 printf_filtered (_("[Switching to %s]\n"),
a068643d 8235 target_pid_to_str (inferior_ptid).c_str ());
3b12939d
PA
8236 annotate_thread_changed ();
8237 }
39f77062 8238 previous_inferior_ptid = inferior_ptid;
c906108c 8239 }
c906108c 8240
0e5bf2a8
PA
8241 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
8242 {
0e454242 8243 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8244 if (current_ui->prompt_state == PROMPT_BLOCKED)
8245 {
223ffa71 8246 target_terminal::ours_for_output ();
3b12939d
PA
8247 printf_filtered (_("No unwaited-for children left.\n"));
8248 }
0e5bf2a8
PA
8249 }
8250
b57bacec 8251 /* Note: this depends on the update_thread_list call above. */
388a7084 8252 maybe_remove_breakpoints ();
c906108c 8253
c906108c
SS
8254 /* If an auto-display called a function and that got a signal,
8255 delete that auto-display to avoid an infinite recursion. */
8256
8257 if (stopped_by_random_signal)
8258 disable_current_display ();
8259
0e454242 8260 SWITCH_THRU_ALL_UIS ()
3b12939d
PA
8261 {
8262 async_enable_stdin ();
8263 }
c906108c 8264
388a7084 8265 /* Let the user/frontend see the threads as stopped. */
731f534f 8266 maybe_finish_thread_state.reset ();
388a7084
PA
8267
8268 /* Select innermost stack frame - i.e., current frame is frame 0,
8269 and current location is based on that. Handle the case where the
8270 dummy call is returning after being stopped. E.g. the dummy call
8271 previously hit a breakpoint. (If the dummy call returns
8272 normally, we won't reach here.) Do this before the stop hook is
8273 run, so that it doesn't get to see the temporary dummy frame,
8274 which is not where we'll present the stop. */
8275 if (has_stack_frames ())
8276 {
8277 if (stop_stack_dummy == STOP_STACK_DUMMY)
8278 {
8279 /* Pop the empty frame that contains the stack dummy. This
8280 also restores inferior state prior to the call (struct
8281 infcall_suspend_state). */
8282 struct frame_info *frame = get_current_frame ();
8283
8284 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
8285 frame_pop (frame);
8286 /* frame_pop calls reinit_frame_cache as the last thing it
8287 does which means there's now no selected frame. */
8288 }
8289
8290 select_frame (get_current_frame ());
8291
8292 /* Set the current source location. */
8293 set_current_sal_from_frame (get_current_frame ());
8294 }
dd7e2d2b
PA
8295
8296 /* Look up the hook_stop and run it (CLI internally handles problem
8297 of stop_command's pre-hook not existing). */
4c2f2a79
PA
8298 if (stop_command != NULL)
8299 {
2d844eaf 8300 stop_context saved_context;
4c2f2a79 8301
a70b8144 8302 try
bf469271
PA
8303 {
8304 execute_cmd_pre_hook (stop_command);
8305 }
230d2906 8306 catch (const gdb_exception &ex)
bf469271
PA
8307 {
8308 exception_fprintf (gdb_stderr, ex,
8309 "Error while running hook_stop:\n");
8310 }
4c2f2a79
PA
8311
8312 /* If the stop hook resumes the target, then there's no point in
8313 trying to notify about the previous stop; its context is
8314 gone. Likewise if the command switches thread or inferior --
8315 the observers would print a stop for the wrong
8316 thread/inferior. */
2d844eaf
TT
8317 if (saved_context.changed ())
8318 return 1;
4c2f2a79 8319 }
dd7e2d2b 8320
388a7084
PA
8321 /* Notify observers about the stop. This is where the interpreters
8322 print the stop event. */
d7e15655 8323 if (inferior_ptid != null_ptid)
76727919 8324 gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
388a7084
PA
8325 stop_print_frame);
8326 else
76727919 8327 gdb::observers::normal_stop.notify (NULL, stop_print_frame);
347bddb7 8328
243a9253
PA
8329 annotate_stopped ();
8330
55f6301a 8331 if (target_has_execution ())
48844aa6
PA
8332 {
8333 if (last.kind != TARGET_WAITKIND_SIGNALLED
fe726667
PA
8334 && last.kind != TARGET_WAITKIND_EXITED
8335 && last.kind != TARGET_WAITKIND_NO_RESUMED)
48844aa6
PA
8336 /* Delete the breakpoint we stopped at, if it wants to be deleted.
8337 Delete any breakpoint that is to be deleted at the next stop. */
16c381f0 8338 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
94cc34af 8339 }
6c95b8df
PA
8340
8341 /* Try to get rid of automatically added inferiors that are no
8342 longer needed. Keeping those around slows down things linearly.
8343 Note that this never removes the current inferior. */
8344 prune_inferiors ();
4c2f2a79
PA
8345
8346 return 0;
c906108c 8347}
c906108c 8348\f
c5aa993b 8349int
96baa820 8350signal_stop_state (int signo)
c906108c 8351{
d6b48e9c 8352 return signal_stop[signo];
c906108c
SS
8353}
8354
c5aa993b 8355int
96baa820 8356signal_print_state (int signo)
c906108c
SS
8357{
8358 return signal_print[signo];
8359}
8360
c5aa993b 8361int
96baa820 8362signal_pass_state (int signo)
c906108c
SS
8363{
8364 return signal_program[signo];
8365}
8366
2455069d
UW
8367static void
8368signal_cache_update (int signo)
8369{
8370 if (signo == -1)
8371 {
a493e3e2 8372 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
2455069d
UW
8373 signal_cache_update (signo);
8374
8375 return;
8376 }
8377
8378 signal_pass[signo] = (signal_stop[signo] == 0
8379 && signal_print[signo] == 0
ab04a2af
TT
8380 && signal_program[signo] == 1
8381 && signal_catch[signo] == 0);
2455069d
UW
8382}
8383
488f131b 8384int
7bda5e4a 8385signal_stop_update (int signo, int state)
d4f3574e
SS
8386{
8387 int ret = signal_stop[signo];
abbb1732 8388
d4f3574e 8389 signal_stop[signo] = state;
2455069d 8390 signal_cache_update (signo);
d4f3574e
SS
8391 return ret;
8392}
8393
488f131b 8394int
7bda5e4a 8395signal_print_update (int signo, int state)
d4f3574e
SS
8396{
8397 int ret = signal_print[signo];
abbb1732 8398
d4f3574e 8399 signal_print[signo] = state;
2455069d 8400 signal_cache_update (signo);
d4f3574e
SS
8401 return ret;
8402}
8403
488f131b 8404int
7bda5e4a 8405signal_pass_update (int signo, int state)
d4f3574e
SS
8406{
8407 int ret = signal_program[signo];
abbb1732 8408
d4f3574e 8409 signal_program[signo] = state;
2455069d 8410 signal_cache_update (signo);
d4f3574e
SS
8411 return ret;
8412}
8413
ab04a2af
TT
8414/* Update the global 'signal_catch' from INFO and notify the
8415 target. */
8416
8417void
8418signal_catch_update (const unsigned int *info)
8419{
8420 int i;
8421
8422 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
8423 signal_catch[i] = info[i] > 0;
8424 signal_cache_update (-1);
adc6a863 8425 target_pass_signals (signal_pass);
ab04a2af
TT
8426}
8427
c906108c 8428static void
96baa820 8429sig_print_header (void)
c906108c 8430{
3e43a32a
MS
8431 printf_filtered (_("Signal Stop\tPrint\tPass "
8432 "to program\tDescription\n"));
c906108c
SS
8433}
8434
8435static void
2ea28649 8436sig_print_info (enum gdb_signal oursig)
c906108c 8437{
2ea28649 8438 const char *name = gdb_signal_to_name (oursig);
c906108c 8439 int name_padding = 13 - strlen (name);
96baa820 8440
c906108c
SS
8441 if (name_padding <= 0)
8442 name_padding = 0;
8443
8444 printf_filtered ("%s", name);
488f131b 8445 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
c906108c
SS
8446 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
8447 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
8448 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
2ea28649 8449 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
c906108c
SS
8450}
8451
8452/* Specify how various signals in the inferior should be handled. */
8453
8454static void
0b39b52e 8455handle_command (const char *args, int from_tty)
c906108c 8456{
c906108c 8457 int digits, wordlen;
b926417a 8458 int sigfirst, siglast;
2ea28649 8459 enum gdb_signal oursig;
c906108c 8460 int allsigs;
c906108c
SS
8461
8462 if (args == NULL)
8463 {
e2e0b3e5 8464 error_no_arg (_("signal to handle"));
c906108c
SS
8465 }
8466
1777feb0 8467 /* Allocate and zero an array of flags for which signals to handle. */
c906108c 8468
adc6a863
PA
8469 const size_t nsigs = GDB_SIGNAL_LAST;
8470 unsigned char sigs[nsigs] {};
c906108c 8471
1777feb0 8472 /* Break the command line up into args. */
c906108c 8473
773a1edc 8474 gdb_argv built_argv (args);
c906108c
SS
8475
8476 /* Walk through the args, looking for signal oursigs, signal names, and
8477 actions. Signal numbers and signal names may be interspersed with
8478 actions, with the actions being performed for all signals cumulatively
1777feb0 8479 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
c906108c 8480
773a1edc 8481 for (char *arg : built_argv)
c906108c 8482 {
773a1edc
TT
8483 wordlen = strlen (arg);
8484 for (digits = 0; isdigit (arg[digits]); digits++)
c906108c
SS
8485 {;
8486 }
8487 allsigs = 0;
8488 sigfirst = siglast = -1;
8489
773a1edc 8490 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
c906108c
SS
8491 {
8492 /* Apply action to all signals except those used by the
1777feb0 8493 debugger. Silently skip those. */
c906108c
SS
8494 allsigs = 1;
8495 sigfirst = 0;
8496 siglast = nsigs - 1;
8497 }
773a1edc 8498 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
c906108c
SS
8499 {
8500 SET_SIGS (nsigs, sigs, signal_stop);
8501 SET_SIGS (nsigs, sigs, signal_print);
8502 }
773a1edc 8503 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
c906108c
SS
8504 {
8505 UNSET_SIGS (nsigs, sigs, signal_program);
8506 }
773a1edc 8507 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
c906108c
SS
8508 {
8509 SET_SIGS (nsigs, sigs, signal_print);
8510 }
773a1edc 8511 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
c906108c
SS
8512 {
8513 SET_SIGS (nsigs, sigs, signal_program);
8514 }
773a1edc 8515 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
c906108c
SS
8516 {
8517 UNSET_SIGS (nsigs, sigs, signal_stop);
8518 }
773a1edc 8519 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
c906108c
SS
8520 {
8521 SET_SIGS (nsigs, sigs, signal_program);
8522 }
773a1edc 8523 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
c906108c
SS
8524 {
8525 UNSET_SIGS (nsigs, sigs, signal_print);
8526 UNSET_SIGS (nsigs, sigs, signal_stop);
8527 }
773a1edc 8528 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
c906108c
SS
8529 {
8530 UNSET_SIGS (nsigs, sigs, signal_program);
8531 }
8532 else if (digits > 0)
8533 {
8534 /* It is numeric. The numeric signal refers to our own
8535 internal signal numbering from target.h, not to host/target
8536 signal number. This is a feature; users really should be
8537 using symbolic names anyway, and the common ones like
8538 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
8539
8540 sigfirst = siglast = (int)
773a1edc
TT
8541 gdb_signal_from_command (atoi (arg));
8542 if (arg[digits] == '-')
c906108c
SS
8543 {
8544 siglast = (int)
773a1edc 8545 gdb_signal_from_command (atoi (arg + digits + 1));
c906108c
SS
8546 }
8547 if (sigfirst > siglast)
8548 {
1777feb0 8549 /* Bet he didn't figure we'd think of this case... */
b926417a 8550 std::swap (sigfirst, siglast);
c906108c
SS
8551 }
8552 }
8553 else
8554 {
773a1edc 8555 oursig = gdb_signal_from_name (arg);
a493e3e2 8556 if (oursig != GDB_SIGNAL_UNKNOWN)
c906108c
SS
8557 {
8558 sigfirst = siglast = (int) oursig;
8559 }
8560 else
8561 {
8562 /* Not a number and not a recognized flag word => complain. */
773a1edc 8563 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
c906108c
SS
8564 }
8565 }
8566
8567 /* If any signal numbers or symbol names were found, set flags for
dda83cd7 8568 which signals to apply actions to. */
c906108c 8569
b926417a 8570 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
c906108c 8571 {
2ea28649 8572 switch ((enum gdb_signal) signum)
c906108c 8573 {
a493e3e2
PA
8574 case GDB_SIGNAL_TRAP:
8575 case GDB_SIGNAL_INT:
c906108c
SS
8576 if (!allsigs && !sigs[signum])
8577 {
9e2f0ad4 8578 if (query (_("%s is used by the debugger.\n\
3e43a32a 8579Are you sure you want to change it? "),
2ea28649 8580 gdb_signal_to_name ((enum gdb_signal) signum)))
c906108c
SS
8581 {
8582 sigs[signum] = 1;
8583 }
8584 else
c119e040 8585 printf_unfiltered (_("Not confirmed, unchanged.\n"));
c906108c
SS
8586 }
8587 break;
a493e3e2
PA
8588 case GDB_SIGNAL_0:
8589 case GDB_SIGNAL_DEFAULT:
8590 case GDB_SIGNAL_UNKNOWN:
c906108c
SS
8591 /* Make sure that "all" doesn't print these. */
8592 break;
8593 default:
8594 sigs[signum] = 1;
8595 break;
8596 }
8597 }
c906108c
SS
8598 }
8599
b926417a 8600 for (int signum = 0; signum < nsigs; signum++)
3a031f65
PA
8601 if (sigs[signum])
8602 {
2455069d 8603 signal_cache_update (-1);
adc6a863
PA
8604 target_pass_signals (signal_pass);
8605 target_program_signals (signal_program);
c906108c 8606
3a031f65
PA
8607 if (from_tty)
8608 {
8609 /* Show the results. */
8610 sig_print_header ();
8611 for (; signum < nsigs; signum++)
8612 if (sigs[signum])
aead7601 8613 sig_print_info ((enum gdb_signal) signum);
3a031f65
PA
8614 }
8615
8616 break;
8617 }
c906108c
SS
8618}
8619
de0bea00
MF
8620/* Complete the "handle" command. */
8621
eb3ff9a5 8622static void
de0bea00 8623handle_completer (struct cmd_list_element *ignore,
eb3ff9a5 8624 completion_tracker &tracker,
6f937416 8625 const char *text, const char *word)
de0bea00 8626{
de0bea00
MF
8627 static const char * const keywords[] =
8628 {
8629 "all",
8630 "stop",
8631 "ignore",
8632 "print",
8633 "pass",
8634 "nostop",
8635 "noignore",
8636 "noprint",
8637 "nopass",
8638 NULL,
8639 };
8640
eb3ff9a5
PA
8641 signal_completer (ignore, tracker, text, word);
8642 complete_on_enum (tracker, keywords, word, word);
de0bea00
MF
8643}
8644
2ea28649
PA
8645enum gdb_signal
8646gdb_signal_from_command (int num)
ed01b82c
PA
8647{
8648 if (num >= 1 && num <= 15)
2ea28649 8649 return (enum gdb_signal) num;
ed01b82c
PA
8650 error (_("Only signals 1-15 are valid as numeric signals.\n\
8651Use \"info signals\" for a list of symbolic signals."));
8652}
8653
c906108c
SS
8654/* Print current contents of the tables set by the handle command.
8655 It is possible we should just be printing signals actually used
8656 by the current target (but for things to work right when switching
8657 targets, all signals should be in the signal tables). */
8658
8659static void
1d12d88f 8660info_signals_command (const char *signum_exp, int from_tty)
c906108c 8661{
2ea28649 8662 enum gdb_signal oursig;
abbb1732 8663
c906108c
SS
8664 sig_print_header ();
8665
8666 if (signum_exp)
8667 {
8668 /* First see if this is a symbol name. */
2ea28649 8669 oursig = gdb_signal_from_name (signum_exp);
a493e3e2 8670 if (oursig == GDB_SIGNAL_UNKNOWN)
c906108c
SS
8671 {
8672 /* No, try numeric. */
8673 oursig =
2ea28649 8674 gdb_signal_from_command (parse_and_eval_long (signum_exp));
c906108c
SS
8675 }
8676 sig_print_info (oursig);
8677 return;
8678 }
8679
8680 printf_filtered ("\n");
8681 /* These ugly casts brought to you by the native VAX compiler. */
a493e3e2
PA
8682 for (oursig = GDB_SIGNAL_FIRST;
8683 (int) oursig < (int) GDB_SIGNAL_LAST;
2ea28649 8684 oursig = (enum gdb_signal) ((int) oursig + 1))
c906108c
SS
8685 {
8686 QUIT;
8687
a493e3e2
PA
8688 if (oursig != GDB_SIGNAL_UNKNOWN
8689 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
c906108c
SS
8690 sig_print_info (oursig);
8691 }
8692
3e43a32a
MS
8693 printf_filtered (_("\nUse the \"handle\" command "
8694 "to change these tables.\n"));
c906108c 8695}
4aa995e1
PA
8696
8697/* The $_siginfo convenience variable is a bit special. We don't know
8698 for sure the type of the value until we actually have a chance to
7a9dd1b2 8699 fetch the data. The type can change depending on gdbarch, so it is
4aa995e1
PA
8700 also dependent on which thread you have selected.
8701
8702 1. making $_siginfo be an internalvar that creates a new value on
8703 access.
8704
8705 2. making the value of $_siginfo be an lval_computed value. */
8706
8707/* This function implements the lval_computed support for reading a
8708 $_siginfo value. */
8709
8710static void
8711siginfo_value_read (struct value *v)
8712{
8713 LONGEST transferred;
8714
a911d87a
PA
8715 /* If we can access registers, so can we access $_siginfo. Likewise
8716 vice versa. */
8717 validate_registers_access ();
c709acd1 8718
4aa995e1 8719 transferred =
8b88a78e 8720 target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO,
4aa995e1
PA
8721 NULL,
8722 value_contents_all_raw (v),
8723 value_offset (v),
8724 TYPE_LENGTH (value_type (v)));
8725
8726 if (transferred != TYPE_LENGTH (value_type (v)))
8727 error (_("Unable to read siginfo"));
8728}
8729
8730/* This function implements the lval_computed support for writing a
8731 $_siginfo value. */
8732
8733static void
8734siginfo_value_write (struct value *v, struct value *fromval)
8735{
8736 LONGEST transferred;
8737
a911d87a
PA
8738 /* If we can access registers, so can we access $_siginfo. Likewise
8739 vice versa. */
8740 validate_registers_access ();
c709acd1 8741
8b88a78e 8742 transferred = target_write (current_top_target (),
4aa995e1
PA
8743 TARGET_OBJECT_SIGNAL_INFO,
8744 NULL,
8745 value_contents_all_raw (fromval),
8746 value_offset (v),
8747 TYPE_LENGTH (value_type (fromval)));
8748
8749 if (transferred != TYPE_LENGTH (value_type (fromval)))
8750 error (_("Unable to write siginfo"));
8751}
8752
c8f2448a 8753static const struct lval_funcs siginfo_value_funcs =
4aa995e1
PA
8754 {
8755 siginfo_value_read,
8756 siginfo_value_write
8757 };
8758
8759/* Return a new value with the correct type for the siginfo object of
78267919
UW
8760 the current thread using architecture GDBARCH. Return a void value
8761 if there's no object available. */
4aa995e1 8762
2c0b251b 8763static struct value *
22d2b532
SDJ
8764siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
8765 void *ignore)
4aa995e1 8766{
841de120 8767 if (target_has_stack ()
d7e15655 8768 && inferior_ptid != null_ptid
78267919 8769 && gdbarch_get_siginfo_type_p (gdbarch))
4aa995e1 8770 {
78267919 8771 struct type *type = gdbarch_get_siginfo_type (gdbarch);
abbb1732 8772
78267919 8773 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
4aa995e1
PA
8774 }
8775
78267919 8776 return allocate_value (builtin_type (gdbarch)->builtin_void);
4aa995e1
PA
8777}
8778
c906108c 8779\f
16c381f0
JK
8780/* infcall_suspend_state contains state about the program itself like its
8781 registers and any signal it received when it last stopped.
8782 This state must be restored regardless of how the inferior function call
8783 ends (either successfully, or after it hits a breakpoint or signal)
8784 if the program is to properly continue where it left off. */
8785
6bf78e29 8786class infcall_suspend_state
7a292a7a 8787{
6bf78e29
AB
8788public:
8789 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
8790 once the inferior function call has finished. */
8791 infcall_suspend_state (struct gdbarch *gdbarch,
dda83cd7
SM
8792 const struct thread_info *tp,
8793 struct regcache *regcache)
6bf78e29
AB
8794 : m_thread_suspend (tp->suspend),
8795 m_registers (new readonly_detached_regcache (*regcache))
8796 {
8797 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
8798
8799 if (gdbarch_get_siginfo_type_p (gdbarch))
8800 {
dda83cd7
SM
8801 struct type *type = gdbarch_get_siginfo_type (gdbarch);
8802 size_t len = TYPE_LENGTH (type);
6bf78e29 8803
dda83cd7 8804 siginfo_data.reset ((gdb_byte *) xmalloc (len));
6bf78e29 8805
dda83cd7
SM
8806 if (target_read (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8807 siginfo_data.get (), 0, len) != len)
8808 {
8809 /* Errors ignored. */
8810 siginfo_data.reset (nullptr);
8811 }
6bf78e29
AB
8812 }
8813
8814 if (siginfo_data)
8815 {
dda83cd7
SM
8816 m_siginfo_gdbarch = gdbarch;
8817 m_siginfo_data = std::move (siginfo_data);
6bf78e29
AB
8818 }
8819 }
8820
8821 /* Return a pointer to the stored register state. */
16c381f0 8822
6bf78e29
AB
8823 readonly_detached_regcache *registers () const
8824 {
8825 return m_registers.get ();
8826 }
8827
8828 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
8829
8830 void restore (struct gdbarch *gdbarch,
dda83cd7
SM
8831 struct thread_info *tp,
8832 struct regcache *regcache) const
6bf78e29
AB
8833 {
8834 tp->suspend = m_thread_suspend;
8835
8836 if (m_siginfo_gdbarch == gdbarch)
8837 {
dda83cd7 8838 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6bf78e29 8839
dda83cd7
SM
8840 /* Errors ignored. */
8841 target_write (current_top_target (), TARGET_OBJECT_SIGNAL_INFO, NULL,
8842 m_siginfo_data.get (), 0, TYPE_LENGTH (type));
6bf78e29
AB
8843 }
8844
8845 /* The inferior can be gone if the user types "print exit(0)"
8846 (and perhaps other times). */
55f6301a 8847 if (target_has_execution ())
6bf78e29
AB
8848 /* NB: The register write goes through to the target. */
8849 regcache->restore (registers ());
8850 }
8851
8852private:
8853 /* How the current thread stopped before the inferior function call was
8854 executed. */
8855 struct thread_suspend_state m_thread_suspend;
8856
8857 /* The registers before the inferior function call was executed. */
8858 std::unique_ptr<readonly_detached_regcache> m_registers;
1736ad11 8859
35515841 8860 /* Format of SIGINFO_DATA or NULL if it is not present. */
6bf78e29 8861 struct gdbarch *m_siginfo_gdbarch = nullptr;
1736ad11
JK
8862
8863 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
8864 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
8865 content would be invalid. */
6bf78e29 8866 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
b89667eb
DE
8867};
8868
cb524840
TT
8869infcall_suspend_state_up
8870save_infcall_suspend_state ()
b89667eb 8871{
b89667eb 8872 struct thread_info *tp = inferior_thread ();
1736ad11 8873 struct regcache *regcache = get_current_regcache ();
ac7936df 8874 struct gdbarch *gdbarch = regcache->arch ();
1736ad11 8875
6bf78e29
AB
8876 infcall_suspend_state_up inf_state
8877 (new struct infcall_suspend_state (gdbarch, tp, regcache));
1736ad11 8878
6bf78e29
AB
8879 /* Having saved the current state, adjust the thread state, discarding
8880 any stop signal information. The stop signal is not useful when
8881 starting an inferior function call, and run_inferior_call will not use
8882 the signal due to its `proceed' call with GDB_SIGNAL_0. */
a493e3e2 8883 tp->suspend.stop_signal = GDB_SIGNAL_0;
35515841 8884
b89667eb
DE
8885 return inf_state;
8886}
8887
8888/* Restore inferior session state to INF_STATE. */
8889
8890void
16c381f0 8891restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb
DE
8892{
8893 struct thread_info *tp = inferior_thread ();
1736ad11 8894 struct regcache *regcache = get_current_regcache ();
ac7936df 8895 struct gdbarch *gdbarch = regcache->arch ();
b89667eb 8896
6bf78e29 8897 inf_state->restore (gdbarch, tp, regcache);
16c381f0 8898 discard_infcall_suspend_state (inf_state);
b89667eb
DE
8899}
8900
b89667eb 8901void
16c381f0 8902discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
b89667eb 8903{
dd848631 8904 delete inf_state;
b89667eb
DE
8905}
8906
daf6667d 8907readonly_detached_regcache *
16c381f0 8908get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
b89667eb 8909{
6bf78e29 8910 return inf_state->registers ();
b89667eb
DE
8911}
8912
16c381f0
JK
8913/* infcall_control_state contains state regarding gdb's control of the
8914 inferior itself like stepping control. It also contains session state like
8915 the user's currently selected frame. */
b89667eb 8916
16c381f0 8917struct infcall_control_state
b89667eb 8918{
16c381f0
JK
8919 struct thread_control_state thread_control;
8920 struct inferior_control_state inferior_control;
d82142e2
JK
8921
8922 /* Other fields: */
ee841dd8
TT
8923 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
8924 int stopped_by_random_signal = 0;
7a292a7a 8925
79952e69
PA
8926 /* ID and level of the selected frame when the inferior function
8927 call was made. */
ee841dd8 8928 struct frame_id selected_frame_id {};
79952e69 8929 int selected_frame_level = -1;
7a292a7a
SS
8930};
8931
c906108c 8932/* Save all of the information associated with the inferior<==>gdb
b89667eb 8933 connection. */
c906108c 8934
cb524840
TT
8935infcall_control_state_up
8936save_infcall_control_state ()
c906108c 8937{
cb524840 8938 infcall_control_state_up inf_status (new struct infcall_control_state);
4e1c45ea 8939 struct thread_info *tp = inferior_thread ();
d6b48e9c 8940 struct inferior *inf = current_inferior ();
7a292a7a 8941
16c381f0
JK
8942 inf_status->thread_control = tp->control;
8943 inf_status->inferior_control = inf->control;
d82142e2 8944
8358c15c 8945 tp->control.step_resume_breakpoint = NULL;
5b79abe7 8946 tp->control.exception_resume_breakpoint = NULL;
8358c15c 8947
16c381f0
JK
8948 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
8949 chain. If caller's caller is walking the chain, they'll be happier if we
8950 hand them back the original chain when restore_infcall_control_state is
8951 called. */
8952 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
d82142e2
JK
8953
8954 /* Other fields: */
8955 inf_status->stop_stack_dummy = stop_stack_dummy;
8956 inf_status->stopped_by_random_signal = stopped_by_random_signal;
c5aa993b 8957
79952e69
PA
8958 save_selected_frame (&inf_status->selected_frame_id,
8959 &inf_status->selected_frame_level);
b89667eb 8960
7a292a7a 8961 return inf_status;
c906108c
SS
8962}
8963
b89667eb
DE
8964/* Restore inferior session state to INF_STATUS. */
8965
c906108c 8966void
16c381f0 8967restore_infcall_control_state (struct infcall_control_state *inf_status)
c906108c 8968{
4e1c45ea 8969 struct thread_info *tp = inferior_thread ();
d6b48e9c 8970 struct inferior *inf = current_inferior ();
4e1c45ea 8971
8358c15c
JK
8972 if (tp->control.step_resume_breakpoint)
8973 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
8974
5b79abe7
TT
8975 if (tp->control.exception_resume_breakpoint)
8976 tp->control.exception_resume_breakpoint->disposition
8977 = disp_del_at_next_stop;
8978
d82142e2 8979 /* Handle the bpstat_copy of the chain. */
16c381f0 8980 bpstat_clear (&tp->control.stop_bpstat);
d82142e2 8981
16c381f0
JK
8982 tp->control = inf_status->thread_control;
8983 inf->control = inf_status->inferior_control;
d82142e2
JK
8984
8985 /* Other fields: */
8986 stop_stack_dummy = inf_status->stop_stack_dummy;
8987 stopped_by_random_signal = inf_status->stopped_by_random_signal;
c906108c 8988
841de120 8989 if (target_has_stack ())
c906108c 8990 {
79952e69
PA
8991 restore_selected_frame (inf_status->selected_frame_id,
8992 inf_status->selected_frame_level);
c906108c 8993 }
c906108c 8994
ee841dd8 8995 delete inf_status;
7a292a7a 8996}
c906108c
SS
8997
8998void
16c381f0 8999discard_infcall_control_state (struct infcall_control_state *inf_status)
7a292a7a 9000{
8358c15c
JK
9001 if (inf_status->thread_control.step_resume_breakpoint)
9002 inf_status->thread_control.step_resume_breakpoint->disposition
9003 = disp_del_at_next_stop;
9004
5b79abe7
TT
9005 if (inf_status->thread_control.exception_resume_breakpoint)
9006 inf_status->thread_control.exception_resume_breakpoint->disposition
9007 = disp_del_at_next_stop;
9008
1777feb0 9009 /* See save_infcall_control_state for info on stop_bpstat. */
16c381f0 9010 bpstat_clear (&inf_status->thread_control.stop_bpstat);
8358c15c 9011
ee841dd8 9012 delete inf_status;
7a292a7a 9013}
b89667eb 9014\f
7f89fd65 9015/* See infrun.h. */
0c557179
SDJ
9016
9017void
9018clear_exit_convenience_vars (void)
9019{
9020 clear_internalvar (lookup_internalvar ("_exitsignal"));
9021 clear_internalvar (lookup_internalvar ("_exitcode"));
9022}
c5aa993b 9023\f
488f131b 9024
b2175913
MS
9025/* User interface for reverse debugging:
9026 Set exec-direction / show exec-direction commands
9027 (returns error unless target implements to_set_exec_direction method). */
9028
170742de 9029enum exec_direction_kind execution_direction = EXEC_FORWARD;
b2175913
MS
9030static const char exec_forward[] = "forward";
9031static const char exec_reverse[] = "reverse";
9032static const char *exec_direction = exec_forward;
40478521 9033static const char *const exec_direction_names[] = {
b2175913
MS
9034 exec_forward,
9035 exec_reverse,
9036 NULL
9037};
9038
9039static void
eb4c3f4a 9040set_exec_direction_func (const char *args, int from_tty,
b2175913
MS
9041 struct cmd_list_element *cmd)
9042{
05374cfd 9043 if (target_can_execute_reverse ())
b2175913
MS
9044 {
9045 if (!strcmp (exec_direction, exec_forward))
9046 execution_direction = EXEC_FORWARD;
9047 else if (!strcmp (exec_direction, exec_reverse))
9048 execution_direction = EXEC_REVERSE;
9049 }
8bbed405
MS
9050 else
9051 {
9052 exec_direction = exec_forward;
9053 error (_("Target does not support this operation."));
9054 }
b2175913
MS
9055}
9056
9057static void
9058show_exec_direction_func (struct ui_file *out, int from_tty,
9059 struct cmd_list_element *cmd, const char *value)
9060{
9061 switch (execution_direction) {
9062 case EXEC_FORWARD:
9063 fprintf_filtered (out, _("Forward.\n"));
9064 break;
9065 case EXEC_REVERSE:
9066 fprintf_filtered (out, _("Reverse.\n"));
9067 break;
b2175913 9068 default:
d8b34453
PA
9069 internal_error (__FILE__, __LINE__,
9070 _("bogus execution_direction value: %d"),
9071 (int) execution_direction);
b2175913
MS
9072 }
9073}
9074
d4db2f36
PA
9075static void
9076show_schedule_multiple (struct ui_file *file, int from_tty,
9077 struct cmd_list_element *c, const char *value)
9078{
3e43a32a
MS
9079 fprintf_filtered (file, _("Resuming the execution of threads "
9080 "of all processes is %s.\n"), value);
d4db2f36 9081}
ad52ddc6 9082
22d2b532
SDJ
9083/* Implementation of `siginfo' variable. */
9084
9085static const struct internalvar_funcs siginfo_funcs =
9086{
9087 siginfo_make_value,
9088 NULL,
9089 NULL
9090};
9091
372316f1
PA
9092/* Callback for infrun's target events source. This is marked when a
9093 thread has a pending status to process. */
9094
9095static void
9096infrun_async_inferior_event_handler (gdb_client_data data)
9097{
b1a35af2 9098 inferior_event_handler (INF_REG_EVENT);
372316f1
PA
9099}
9100
8087c3fa 9101#if GDB_SELF_TEST
b161a60d
SM
9102namespace selftests
9103{
9104
9105/* Verify that when two threads with the same ptid exist (from two different
9106 targets) and one of them changes ptid, we only update inferior_ptid if
9107 it is appropriate. */
9108
9109static void
9110infrun_thread_ptid_changed ()
9111{
9112 gdbarch *arch = current_inferior ()->gdbarch;
9113
9114 /* The thread which inferior_ptid represents changes ptid. */
9115 {
9116 scoped_restore_current_pspace_and_thread restore;
9117
9118 scoped_mock_context<test_target_ops> target1 (arch);
9119 scoped_mock_context<test_target_ops> target2 (arch);
9120 target2.mock_inferior.next = &target1.mock_inferior;
9121
9122 ptid_t old_ptid (111, 222);
9123 ptid_t new_ptid (111, 333);
9124
9125 target1.mock_inferior.pid = old_ptid.pid ();
9126 target1.mock_thread.ptid = old_ptid;
9127 target2.mock_inferior.pid = old_ptid.pid ();
9128 target2.mock_thread.ptid = old_ptid;
9129
9130 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9131 set_current_inferior (&target1.mock_inferior);
9132
9133 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9134
9135 gdb_assert (inferior_ptid == new_ptid);
9136 }
9137
9138 /* A thread with the same ptid as inferior_ptid, but from another target,
9139 changes ptid. */
9140 {
9141 scoped_restore_current_pspace_and_thread restore;
9142
9143 scoped_mock_context<test_target_ops> target1 (arch);
9144 scoped_mock_context<test_target_ops> target2 (arch);
9145 target2.mock_inferior.next = &target1.mock_inferior;
9146
9147 ptid_t old_ptid (111, 222);
9148 ptid_t new_ptid (111, 333);
9149
9150 target1.mock_inferior.pid = old_ptid.pid ();
9151 target1.mock_thread.ptid = old_ptid;
9152 target2.mock_inferior.pid = old_ptid.pid ();
9153 target2.mock_thread.ptid = old_ptid;
9154
9155 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9156 set_current_inferior (&target2.mock_inferior);
9157
9158 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9159
9160 gdb_assert (inferior_ptid == old_ptid);
9161 }
9162}
9163
9164} /* namespace selftests */
9165
8087c3fa
JB
9166#endif /* GDB_SELF_TEST */
9167
6c265988 9168void _initialize_infrun ();
c906108c 9169void
6c265988 9170_initialize_infrun ()
c906108c 9171{
de0bea00 9172 struct cmd_list_element *c;
c906108c 9173
372316f1
PA
9174 /* Register extra event sources in the event loop. */
9175 infrun_async_inferior_event_token
db20ebdf
SM
9176 = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
9177 "infrun");
372316f1 9178
11db9430 9179 add_info ("signals", info_signals_command, _("\
1bedd215
AC
9180What debugger does when program gets various signals.\n\
9181Specify a signal as argument to print info on that signal only."));
c906108c
SS
9182 add_info_alias ("handle", "signals", 0);
9183
de0bea00 9184 c = add_com ("handle", class_run, handle_command, _("\
dfbd5e7b 9185Specify how to handle signals.\n\
486c7739 9186Usage: handle SIGNAL [ACTIONS]\n\
c906108c 9187Args are signals and actions to apply to those signals.\n\
dfbd5e7b 9188If no actions are specified, the current settings for the specified signals\n\
486c7739
MF
9189will be displayed instead.\n\
9190\n\
c906108c
SS
9191Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
9192from 1-15 are allowed for compatibility with old versions of GDB.\n\
9193Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
9194The special arg \"all\" is recognized to mean all signals except those\n\
1bedd215 9195used by the debugger, typically SIGTRAP and SIGINT.\n\
486c7739 9196\n\
1bedd215 9197Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
c906108c
SS
9198\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
9199Stop means reenter debugger if this signal happens (implies print).\n\
9200Print means print a message if this signal happens.\n\
9201Pass means let program see this signal; otherwise program doesn't know.\n\
9202Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
dfbd5e7b
PA
9203Pass and Stop may be combined.\n\
9204\n\
9205Multiple signals may be specified. Signal numbers and signal names\n\
9206may be interspersed with actions, with the actions being performed for\n\
9207all signals cumulatively specified."));
de0bea00 9208 set_cmd_completer (c, handle_completer);
486c7739 9209
c906108c 9210 if (!dbx_commands)
1a966eab
AC
9211 stop_command = add_cmd ("stop", class_obscure,
9212 not_just_help_class_command, _("\
9213There is no `stop' command, but you can set a hook on `stop'.\n\
c906108c 9214This allows you to set a list of commands to be run each time execution\n\
1a966eab 9215of the program stops."), &cmdlist);
c906108c 9216
94ba44a6
SM
9217 add_setshow_boolean_cmd
9218 ("infrun", class_maintenance, &debug_infrun,
9219 _("Set inferior debugging."),
9220 _("Show inferior debugging."),
9221 _("When non-zero, inferior specific debugging is enabled."),
9222 NULL, show_debug_infrun, &setdebuglist, &showdebuglist);
527159b7 9223
ad52ddc6
PA
9224 add_setshow_boolean_cmd ("non-stop", no_class,
9225 &non_stop_1, _("\
9226Set whether gdb controls the inferior in non-stop mode."), _("\
9227Show whether gdb controls the inferior in non-stop mode."), _("\
9228When debugging a multi-threaded program and this setting is\n\
9229off (the default, also called all-stop mode), when one thread stops\n\
9230(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
9231all other threads in the program while you interact with the thread of\n\
9232interest. When you continue or step a thread, you can allow the other\n\
9233threads to run, or have them remain stopped, but while you inspect any\n\
9234thread's state, all threads stop.\n\
9235\n\
9236In non-stop mode, when one thread stops, other threads can continue\n\
9237to run freely. You'll be able to step each thread independently,\n\
9238leave it stopped or free to run as needed."),
9239 set_non_stop,
9240 show_non_stop,
9241 &setlist,
9242 &showlist);
9243
adc6a863 9244 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
c906108c
SS
9245 {
9246 signal_stop[i] = 1;
9247 signal_print[i] = 1;
9248 signal_program[i] = 1;
ab04a2af 9249 signal_catch[i] = 0;
c906108c
SS
9250 }
9251
4d9d9d04
PA
9252 /* Signals caused by debugger's own actions should not be given to
9253 the program afterwards.
9254
9255 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
9256 explicitly specifies that it should be delivered to the target
9257 program. Typically, that would occur when a user is debugging a
9258 target monitor on a simulator: the target monitor sets a
9259 breakpoint; the simulator encounters this breakpoint and halts
9260 the simulation handing control to GDB; GDB, noting that the stop
9261 address doesn't map to any known breakpoint, returns control back
9262 to the simulator; the simulator then delivers the hardware
9263 equivalent of a GDB_SIGNAL_TRAP to the program being
9264 debugged. */
a493e3e2
PA
9265 signal_program[GDB_SIGNAL_TRAP] = 0;
9266 signal_program[GDB_SIGNAL_INT] = 0;
c906108c
SS
9267
9268 /* Signals that are not errors should not normally enter the debugger. */
a493e3e2
PA
9269 signal_stop[GDB_SIGNAL_ALRM] = 0;
9270 signal_print[GDB_SIGNAL_ALRM] = 0;
9271 signal_stop[GDB_SIGNAL_VTALRM] = 0;
9272 signal_print[GDB_SIGNAL_VTALRM] = 0;
9273 signal_stop[GDB_SIGNAL_PROF] = 0;
9274 signal_print[GDB_SIGNAL_PROF] = 0;
9275 signal_stop[GDB_SIGNAL_CHLD] = 0;
9276 signal_print[GDB_SIGNAL_CHLD] = 0;
9277 signal_stop[GDB_SIGNAL_IO] = 0;
9278 signal_print[GDB_SIGNAL_IO] = 0;
9279 signal_stop[GDB_SIGNAL_POLL] = 0;
9280 signal_print[GDB_SIGNAL_POLL] = 0;
9281 signal_stop[GDB_SIGNAL_URG] = 0;
9282 signal_print[GDB_SIGNAL_URG] = 0;
9283 signal_stop[GDB_SIGNAL_WINCH] = 0;
9284 signal_print[GDB_SIGNAL_WINCH] = 0;
9285 signal_stop[GDB_SIGNAL_PRIO] = 0;
9286 signal_print[GDB_SIGNAL_PRIO] = 0;
c906108c 9287
cd0fc7c3
SS
9288 /* These signals are used internally by user-level thread
9289 implementations. (See signal(5) on Solaris.) Like the above
9290 signals, a healthy program receives and handles them as part of
9291 its normal operation. */
a493e3e2
PA
9292 signal_stop[GDB_SIGNAL_LWP] = 0;
9293 signal_print[GDB_SIGNAL_LWP] = 0;
9294 signal_stop[GDB_SIGNAL_WAITING] = 0;
9295 signal_print[GDB_SIGNAL_WAITING] = 0;
9296 signal_stop[GDB_SIGNAL_CANCEL] = 0;
9297 signal_print[GDB_SIGNAL_CANCEL] = 0;
bc7b765a
JB
9298 signal_stop[GDB_SIGNAL_LIBRT] = 0;
9299 signal_print[GDB_SIGNAL_LIBRT] = 0;
cd0fc7c3 9300
2455069d
UW
9301 /* Update cached state. */
9302 signal_cache_update (-1);
9303
85c07804
AC
9304 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
9305 &stop_on_solib_events, _("\
9306Set stopping for shared library events."), _("\
9307Show stopping for shared library events."), _("\
c906108c
SS
9308If nonzero, gdb will give control to the user when the dynamic linker\n\
9309notifies gdb of shared library events. The most common event of interest\n\
85c07804 9310to the user would be loading/unloading of a new library."),
f9e14852 9311 set_stop_on_solib_events,
920d2a44 9312 show_stop_on_solib_events,
85c07804 9313 &setlist, &showlist);
c906108c 9314
7ab04401
AC
9315 add_setshow_enum_cmd ("follow-fork-mode", class_run,
9316 follow_fork_mode_kind_names,
9317 &follow_fork_mode_string, _("\
9318Set debugger response to a program call of fork or vfork."), _("\
9319Show debugger response to a program call of fork or vfork."), _("\
c906108c
SS
9320A fork or vfork creates a new process. follow-fork-mode can be:\n\
9321 parent - the original process is debugged after a fork\n\
9322 child - the new process is debugged after a fork\n\
ea1dd7bc 9323The unfollowed process will continue to run.\n\
7ab04401
AC
9324By default, the debugger will follow the parent process."),
9325 NULL,
920d2a44 9326 show_follow_fork_mode_string,
7ab04401
AC
9327 &setlist, &showlist);
9328
6c95b8df
PA
9329 add_setshow_enum_cmd ("follow-exec-mode", class_run,
9330 follow_exec_mode_names,
9331 &follow_exec_mode_string, _("\
9332Set debugger response to a program call of exec."), _("\
9333Show debugger response to a program call of exec."), _("\
9334An exec call replaces the program image of a process.\n\
9335\n\
9336follow-exec-mode can be:\n\
9337\n\
cce7e648 9338 new - the debugger creates a new inferior and rebinds the process\n\
6c95b8df
PA
9339to this new inferior. The program the process was running before\n\
9340the exec call can be restarted afterwards by restarting the original\n\
9341inferior.\n\
9342\n\
9343 same - the debugger keeps the process bound to the same inferior.\n\
9344The new executable image replaces the previous executable loaded in\n\
9345the inferior. Restarting the inferior after the exec call restarts\n\
9346the executable the process was running after the exec call.\n\
9347\n\
9348By default, the debugger will use the same inferior."),
9349 NULL,
9350 show_follow_exec_mode_string,
9351 &setlist, &showlist);
9352
7ab04401
AC
9353 add_setshow_enum_cmd ("scheduler-locking", class_run,
9354 scheduler_enums, &scheduler_mode, _("\
9355Set mode for locking scheduler during execution."), _("\
9356Show mode for locking scheduler during execution."), _("\
f2665db5
MM
9357off == no locking (threads may preempt at any time)\n\
9358on == full locking (no thread except the current thread may run)\n\
dda83cd7 9359 This applies to both normal execution and replay mode.\n\
f2665db5 9360step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
dda83cd7
SM
9361 In this mode, other threads may run during other commands.\n\
9362 This applies to both normal execution and replay mode.\n\
f2665db5 9363replay == scheduler locked in replay mode and unlocked during normal execution."),
7ab04401 9364 set_schedlock_func, /* traps on target vector */
920d2a44 9365 show_scheduler_mode,
7ab04401 9366 &setlist, &showlist);
5fbbeb29 9367
d4db2f36
PA
9368 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
9369Set mode for resuming threads of all processes."), _("\
9370Show mode for resuming threads of all processes."), _("\
9371When on, execution commands (such as 'continue' or 'next') resume all\n\
9372threads of all processes. When off (which is the default), execution\n\
9373commands only resume the threads of the current process. The set of\n\
9374threads that are resumed is further refined by the scheduler-locking\n\
9375mode (see help set scheduler-locking)."),
9376 NULL,
9377 show_schedule_multiple,
9378 &setlist, &showlist);
9379
5bf193a2
AC
9380 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
9381Set mode of the step operation."), _("\
9382Show mode of the step operation."), _("\
9383When set, doing a step over a function without debug line information\n\
9384will stop at the first instruction of that function. Otherwise, the\n\
9385function is skipped and the step command stops at a different source line."),
9386 NULL,
920d2a44 9387 show_step_stop_if_no_debug,
5bf193a2 9388 &setlist, &showlist);
ca6724c1 9389
72d0e2c5
YQ
9390 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
9391 &can_use_displaced_stepping, _("\
237fc4c9
PA
9392Set debugger's willingness to use displaced stepping."), _("\
9393Show debugger's willingness to use displaced stepping."), _("\
fff08868
HZ
9394If on, gdb will use displaced stepping to step over breakpoints if it is\n\
9395supported by the target architecture. If off, gdb will not use displaced\n\
9396stepping to step over breakpoints, even if such is supported by the target\n\
9397architecture. If auto (which is the default), gdb will use displaced stepping\n\
9398if the target architecture supports it and non-stop mode is active, but will not\n\
9399use it in all-stop mode (see help set non-stop)."),
72d0e2c5
YQ
9400 NULL,
9401 show_can_use_displaced_stepping,
9402 &setlist, &showlist);
237fc4c9 9403
b2175913
MS
9404 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
9405 &exec_direction, _("Set direction of execution.\n\
9406Options are 'forward' or 'reverse'."),
9407 _("Show direction of execution (forward/reverse)."),
9408 _("Tells gdb whether to execute forward or backward."),
9409 set_exec_direction_func, show_exec_direction_func,
9410 &setlist, &showlist);
9411
6c95b8df
PA
9412 /* Set/show detach-on-fork: user-settable mode. */
9413
9414 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
9415Set whether gdb will detach the child of a fork."), _("\
9416Show whether gdb will detach the child of a fork."), _("\
9417Tells gdb whether to detach the child of a fork."),
9418 NULL, NULL, &setlist, &showlist);
9419
03583c20
UW
9420 /* Set/show disable address space randomization mode. */
9421
9422 add_setshow_boolean_cmd ("disable-randomization", class_support,
9423 &disable_randomization, _("\
9424Set disabling of debuggee's virtual address space randomization."), _("\
9425Show disabling of debuggee's virtual address space randomization."), _("\
9426When this mode is on (which is the default), randomization of the virtual\n\
9427address space is disabled. Standalone programs run with the randomization\n\
9428enabled by default on some platforms."),
9429 &set_disable_randomization,
9430 &show_disable_randomization,
9431 &setlist, &showlist);
9432
ca6724c1 9433 /* ptid initializations */
ca6724c1
KB
9434 inferior_ptid = null_ptid;
9435 target_last_wait_ptid = minus_one_ptid;
5231c1fd 9436
76727919
TT
9437 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed);
9438 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested);
9439 gdb::observers::thread_exit.attach (infrun_thread_thread_exit);
9440 gdb::observers::inferior_exit.attach (infrun_inferior_exit);
3b7a962d 9441 gdb::observers::inferior_execd.attach (infrun_inferior_execd);
4aa995e1
PA
9442
9443 /* Explicitly create without lookup, since that tries to create a
9444 value with a void typed value, and when we get here, gdbarch
9445 isn't initialized yet. At this point, we're quite sure there
9446 isn't another convenience variable of the same name. */
22d2b532 9447 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
d914c394
SS
9448
9449 add_setshow_boolean_cmd ("observer", no_class,
9450 &observer_mode_1, _("\
9451Set whether gdb controls the inferior in observer mode."), _("\
9452Show whether gdb controls the inferior in observer mode."), _("\
9453In observer mode, GDB can get data from the inferior, but not\n\
9454affect its execution. Registers and memory may not be changed,\n\
9455breakpoints may not be set, and the program cannot be interrupted\n\
9456or signalled."),
9457 set_observer_mode,
9458 show_observer_mode,
9459 &setlist,
9460 &showlist);
b161a60d
SM
9461
9462#if GDB_SELF_TEST
9463 selftests::register_test ("infrun_thread_ptid_changed",
9464 selftests::infrun_thread_ptid_changed);
9465#endif
c906108c 9466}
This page took 2.85915 seconds and 4 git commands to generate.