infrun.c: add for_each_just_stopped_thread
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Function inferior was in as of last step command. */
330
331 static struct symbol *step_start_function;
332
333 /* Nonzero if we want to give control to the user when we're notified
334 of shared library events by the dynamic linker. */
335 int stop_on_solib_events;
336
337 /* Enable or disable optional shared library event breakpoints
338 as appropriate when the above flag is changed. */
339
340 static void
341 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
342 {
343 update_solib_breakpoints ();
344 }
345
346 static void
347 show_stop_on_solib_events (struct ui_file *file, int from_tty,
348 struct cmd_list_element *c, const char *value)
349 {
350 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
351 value);
352 }
353
354 /* Nonzero means expecting a trace trap
355 and should stop the inferior and return silently when it happens. */
356
357 int stop_after_trap;
358
359 /* Save register contents here when executing a "finish" command or are
360 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
361 Thus this contains the return value from the called function (assuming
362 values are returned in a register). */
363
364 struct regcache *stop_registers;
365
366 /* Nonzero after stop if current stack frame should be printed. */
367
368 static int stop_print_frame;
369
370 /* This is a cached copy of the pid/waitstatus of the last event
371 returned by target_wait()/deprecated_target_wait_hook(). This
372 information is returned by get_last_target_status(). */
373 static ptid_t target_last_wait_ptid;
374 static struct target_waitstatus target_last_waitstatus;
375
376 static void context_switch (ptid_t ptid);
377
378 void init_thread_stepping_state (struct thread_info *tss);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Handle changes to the inferior list based on the type of fork,
402 which process is being followed, and whether the other process
403 should be detached. On entry inferior_ptid must be the ptid of
404 the fork parent. At return inferior_ptid is the ptid of the
405 followed inferior. */
406
407 static int
408 follow_fork_inferior (int follow_child, int detach_fork)
409 {
410 int has_vforked;
411 int parent_pid, child_pid;
412
413 has_vforked = (inferior_thread ()->pending_follow.kind
414 == TARGET_WAITKIND_VFORKED);
415 parent_pid = ptid_get_lwp (inferior_ptid);
416 if (parent_pid == 0)
417 parent_pid = ptid_get_pid (inferior_ptid);
418 child_pid
419 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
420
421 if (has_vforked
422 && !non_stop /* Non-stop always resumes both branches. */
423 && (!target_is_async_p () || sync_execution)
424 && !(follow_child || detach_fork || sched_multi))
425 {
426 /* The parent stays blocked inside the vfork syscall until the
427 child execs or exits. If we don't let the child run, then
428 the parent stays blocked. If we're telling the parent to run
429 in the foreground, the user will not be able to ctrl-c to get
430 back the terminal, effectively hanging the debug session. */
431 fprintf_filtered (gdb_stderr, _("\
432 Can not resume the parent process over vfork in the foreground while\n\
433 holding the child stopped. Try \"set detach-on-fork\" or \
434 \"set schedule-multiple\".\n"));
435 /* FIXME output string > 80 columns. */
436 return 1;
437 }
438
439 if (!follow_child)
440 {
441 /* Detach new forked process? */
442 if (detach_fork)
443 {
444 struct cleanup *old_chain;
445
446 /* Before detaching from the child, remove all breakpoints
447 from it. If we forked, then this has already been taken
448 care of by infrun.c. If we vforked however, any
449 breakpoint inserted in the parent is visible in the
450 child, even those added while stopped in a vfork
451 catchpoint. This will remove the breakpoints from the
452 parent also, but they'll be reinserted below. */
453 if (has_vforked)
454 {
455 /* Keep breakpoints list in sync. */
456 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
457 }
458
459 if (info_verbose || debug_infrun)
460 {
461 target_terminal_ours ();
462 fprintf_filtered (gdb_stdlog,
463 "Detaching after fork from "
464 "child process %d.\n",
465 child_pid);
466 }
467 }
468 else
469 {
470 struct inferior *parent_inf, *child_inf;
471 struct cleanup *old_chain;
472
473 /* Add process to GDB's tables. */
474 child_inf = add_inferior (child_pid);
475
476 parent_inf = current_inferior ();
477 child_inf->attach_flag = parent_inf->attach_flag;
478 copy_terminal_info (child_inf, parent_inf);
479 child_inf->gdbarch = parent_inf->gdbarch;
480 copy_inferior_target_desc_info (child_inf, parent_inf);
481
482 old_chain = save_inferior_ptid ();
483 save_current_program_space ();
484
485 inferior_ptid = ptid_build (child_pid, child_pid, 0);
486 add_thread (inferior_ptid);
487 child_inf->symfile_flags = SYMFILE_NO_READ;
488
489 /* If this is a vfork child, then the address-space is
490 shared with the parent. */
491 if (has_vforked)
492 {
493 child_inf->pspace = parent_inf->pspace;
494 child_inf->aspace = parent_inf->aspace;
495
496 /* The parent will be frozen until the child is done
497 with the shared region. Keep track of the
498 parent. */
499 child_inf->vfork_parent = parent_inf;
500 child_inf->pending_detach = 0;
501 parent_inf->vfork_child = child_inf;
502 parent_inf->pending_detach = 0;
503 }
504 else
505 {
506 child_inf->aspace = new_address_space ();
507 child_inf->pspace = add_program_space (child_inf->aspace);
508 child_inf->removable = 1;
509 set_current_program_space (child_inf->pspace);
510 clone_program_space (child_inf->pspace, parent_inf->pspace);
511
512 /* Let the shared library layer (e.g., solib-svr4) learn
513 about this new process, relocate the cloned exec, pull
514 in shared libraries, and install the solib event
515 breakpoint. If a "cloned-VM" event was propagated
516 better throughout the core, this wouldn't be
517 required. */
518 solib_create_inferior_hook (0);
519 }
520
521 do_cleanups (old_chain);
522 }
523
524 if (has_vforked)
525 {
526 struct inferior *parent_inf;
527
528 parent_inf = current_inferior ();
529
530 /* If we detached from the child, then we have to be careful
531 to not insert breakpoints in the parent until the child
532 is done with the shared memory region. However, if we're
533 staying attached to the child, then we can and should
534 insert breakpoints, so that we can debug it. A
535 subsequent child exec or exit is enough to know when does
536 the child stops using the parent's address space. */
537 parent_inf->waiting_for_vfork_done = detach_fork;
538 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
539 }
540 }
541 else
542 {
543 /* Follow the child. */
544 struct inferior *parent_inf, *child_inf;
545 struct program_space *parent_pspace;
546
547 if (info_verbose || debug_infrun)
548 {
549 target_terminal_ours ();
550 if (has_vforked)
551 fprintf_filtered (gdb_stdlog,
552 _("Attaching after process %d "
553 "vfork to child process %d.\n"),
554 parent_pid, child_pid);
555 else
556 fprintf_filtered (gdb_stdlog,
557 _("Attaching after process %d "
558 "fork to child process %d.\n"),
559 parent_pid, child_pid);
560 }
561
562 /* Add the new inferior first, so that the target_detach below
563 doesn't unpush the target. */
564
565 child_inf = add_inferior (child_pid);
566
567 parent_inf = current_inferior ();
568 child_inf->attach_flag = parent_inf->attach_flag;
569 copy_terminal_info (child_inf, parent_inf);
570 child_inf->gdbarch = parent_inf->gdbarch;
571 copy_inferior_target_desc_info (child_inf, parent_inf);
572
573 parent_pspace = parent_inf->pspace;
574
575 /* If we're vforking, we want to hold on to the parent until the
576 child exits or execs. At child exec or exit time we can
577 remove the old breakpoints from the parent and detach or
578 resume debugging it. Otherwise, detach the parent now; we'll
579 want to reuse it's program/address spaces, but we can't set
580 them to the child before removing breakpoints from the
581 parent, otherwise, the breakpoints module could decide to
582 remove breakpoints from the wrong process (since they'd be
583 assigned to the same address space). */
584
585 if (has_vforked)
586 {
587 gdb_assert (child_inf->vfork_parent == NULL);
588 gdb_assert (parent_inf->vfork_child == NULL);
589 child_inf->vfork_parent = parent_inf;
590 child_inf->pending_detach = 0;
591 parent_inf->vfork_child = child_inf;
592 parent_inf->pending_detach = detach_fork;
593 parent_inf->waiting_for_vfork_done = 0;
594 }
595 else if (detach_fork)
596 target_detach (NULL, 0);
597
598 /* Note that the detach above makes PARENT_INF dangling. */
599
600 /* Add the child thread to the appropriate lists, and switch to
601 this new thread, before cloning the program space, and
602 informing the solib layer about this new process. */
603
604 inferior_ptid = ptid_build (child_pid, child_pid, 0);
605 add_thread (inferior_ptid);
606
607 /* If this is a vfork child, then the address-space is shared
608 with the parent. If we detached from the parent, then we can
609 reuse the parent's program/address spaces. */
610 if (has_vforked || detach_fork)
611 {
612 child_inf->pspace = parent_pspace;
613 child_inf->aspace = child_inf->pspace->aspace;
614 }
615 else
616 {
617 child_inf->aspace = new_address_space ();
618 child_inf->pspace = add_program_space (child_inf->aspace);
619 child_inf->removable = 1;
620 child_inf->symfile_flags = SYMFILE_NO_READ;
621 set_current_program_space (child_inf->pspace);
622 clone_program_space (child_inf->pspace, parent_pspace);
623
624 /* Let the shared library layer (e.g., solib-svr4) learn
625 about this new process, relocate the cloned exec, pull in
626 shared libraries, and install the solib event breakpoint.
627 If a "cloned-VM" event was propagated better throughout
628 the core, this wouldn't be required. */
629 solib_create_inferior_hook (0);
630 }
631 }
632
633 return target_follow_fork (follow_child, detach_fork);
634 }
635
636 /* Tell the target to follow the fork we're stopped at. Returns true
637 if the inferior should be resumed; false, if the target for some
638 reason decided it's best not to resume. */
639
640 static int
641 follow_fork (void)
642 {
643 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
644 int should_resume = 1;
645 struct thread_info *tp;
646
647 /* Copy user stepping state to the new inferior thread. FIXME: the
648 followed fork child thread should have a copy of most of the
649 parent thread structure's run control related fields, not just these.
650 Initialized to avoid "may be used uninitialized" warnings from gcc. */
651 struct breakpoint *step_resume_breakpoint = NULL;
652 struct breakpoint *exception_resume_breakpoint = NULL;
653 CORE_ADDR step_range_start = 0;
654 CORE_ADDR step_range_end = 0;
655 struct frame_id step_frame_id = { 0 };
656 struct interp *command_interp = NULL;
657
658 if (!non_stop)
659 {
660 ptid_t wait_ptid;
661 struct target_waitstatus wait_status;
662
663 /* Get the last target status returned by target_wait(). */
664 get_last_target_status (&wait_ptid, &wait_status);
665
666 /* If not stopped at a fork event, then there's nothing else to
667 do. */
668 if (wait_status.kind != TARGET_WAITKIND_FORKED
669 && wait_status.kind != TARGET_WAITKIND_VFORKED)
670 return 1;
671
672 /* Check if we switched over from WAIT_PTID, since the event was
673 reported. */
674 if (!ptid_equal (wait_ptid, minus_one_ptid)
675 && !ptid_equal (inferior_ptid, wait_ptid))
676 {
677 /* We did. Switch back to WAIT_PTID thread, to tell the
678 target to follow it (in either direction). We'll
679 afterwards refuse to resume, and inform the user what
680 happened. */
681 switch_to_thread (wait_ptid);
682 should_resume = 0;
683 }
684 }
685
686 tp = inferior_thread ();
687
688 /* If there were any forks/vforks that were caught and are now to be
689 followed, then do so now. */
690 switch (tp->pending_follow.kind)
691 {
692 case TARGET_WAITKIND_FORKED:
693 case TARGET_WAITKIND_VFORKED:
694 {
695 ptid_t parent, child;
696
697 /* If the user did a next/step, etc, over a fork call,
698 preserve the stepping state in the fork child. */
699 if (follow_child && should_resume)
700 {
701 step_resume_breakpoint = clone_momentary_breakpoint
702 (tp->control.step_resume_breakpoint);
703 step_range_start = tp->control.step_range_start;
704 step_range_end = tp->control.step_range_end;
705 step_frame_id = tp->control.step_frame_id;
706 exception_resume_breakpoint
707 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
708 command_interp = tp->control.command_interp;
709
710 /* For now, delete the parent's sr breakpoint, otherwise,
711 parent/child sr breakpoints are considered duplicates,
712 and the child version will not be installed. Remove
713 this when the breakpoints module becomes aware of
714 inferiors and address spaces. */
715 delete_step_resume_breakpoint (tp);
716 tp->control.step_range_start = 0;
717 tp->control.step_range_end = 0;
718 tp->control.step_frame_id = null_frame_id;
719 delete_exception_resume_breakpoint (tp);
720 tp->control.command_interp = NULL;
721 }
722
723 parent = inferior_ptid;
724 child = tp->pending_follow.value.related_pid;
725
726 /* Set up inferior(s) as specified by the caller, and tell the
727 target to do whatever is necessary to follow either parent
728 or child. */
729 if (follow_fork_inferior (follow_child, detach_fork))
730 {
731 /* Target refused to follow, or there's some other reason
732 we shouldn't resume. */
733 should_resume = 0;
734 }
735 else
736 {
737 /* This pending follow fork event is now handled, one way
738 or another. The previous selected thread may be gone
739 from the lists by now, but if it is still around, need
740 to clear the pending follow request. */
741 tp = find_thread_ptid (parent);
742 if (tp)
743 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
744
745 /* This makes sure we don't try to apply the "Switched
746 over from WAIT_PID" logic above. */
747 nullify_last_target_wait_ptid ();
748
749 /* If we followed the child, switch to it... */
750 if (follow_child)
751 {
752 switch_to_thread (child);
753
754 /* ... and preserve the stepping state, in case the
755 user was stepping over the fork call. */
756 if (should_resume)
757 {
758 tp = inferior_thread ();
759 tp->control.step_resume_breakpoint
760 = step_resume_breakpoint;
761 tp->control.step_range_start = step_range_start;
762 tp->control.step_range_end = step_range_end;
763 tp->control.step_frame_id = step_frame_id;
764 tp->control.exception_resume_breakpoint
765 = exception_resume_breakpoint;
766 tp->control.command_interp = command_interp;
767 }
768 else
769 {
770 /* If we get here, it was because we're trying to
771 resume from a fork catchpoint, but, the user
772 has switched threads away from the thread that
773 forked. In that case, the resume command
774 issued is most likely not applicable to the
775 child, so just warn, and refuse to resume. */
776 warning (_("Not resuming: switched threads "
777 "before following fork child.\n"));
778 }
779
780 /* Reset breakpoints in the child as appropriate. */
781 follow_inferior_reset_breakpoints ();
782 }
783 else
784 switch_to_thread (parent);
785 }
786 }
787 break;
788 case TARGET_WAITKIND_SPURIOUS:
789 /* Nothing to follow. */
790 break;
791 default:
792 internal_error (__FILE__, __LINE__,
793 "Unexpected pending_follow.kind %d\n",
794 tp->pending_follow.kind);
795 break;
796 }
797
798 return should_resume;
799 }
800
801 static void
802 follow_inferior_reset_breakpoints (void)
803 {
804 struct thread_info *tp = inferior_thread ();
805
806 /* Was there a step_resume breakpoint? (There was if the user
807 did a "next" at the fork() call.) If so, explicitly reset its
808 thread number. Cloned step_resume breakpoints are disabled on
809 creation, so enable it here now that it is associated with the
810 correct thread.
811
812 step_resumes are a form of bp that are made to be per-thread.
813 Since we created the step_resume bp when the parent process
814 was being debugged, and now are switching to the child process,
815 from the breakpoint package's viewpoint, that's a switch of
816 "threads". We must update the bp's notion of which thread
817 it is for, or it'll be ignored when it triggers. */
818
819 if (tp->control.step_resume_breakpoint)
820 {
821 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
822 tp->control.step_resume_breakpoint->loc->enabled = 1;
823 }
824
825 /* Treat exception_resume breakpoints like step_resume breakpoints. */
826 if (tp->control.exception_resume_breakpoint)
827 {
828 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
829 tp->control.exception_resume_breakpoint->loc->enabled = 1;
830 }
831
832 /* Reinsert all breakpoints in the child. The user may have set
833 breakpoints after catching the fork, in which case those
834 were never set in the child, but only in the parent. This makes
835 sure the inserted breakpoints match the breakpoint list. */
836
837 breakpoint_re_set ();
838 insert_breakpoints ();
839 }
840
841 /* The child has exited or execed: resume threads of the parent the
842 user wanted to be executing. */
843
844 static int
845 proceed_after_vfork_done (struct thread_info *thread,
846 void *arg)
847 {
848 int pid = * (int *) arg;
849
850 if (ptid_get_pid (thread->ptid) == pid
851 && is_running (thread->ptid)
852 && !is_executing (thread->ptid)
853 && !thread->stop_requested
854 && thread->suspend.stop_signal == GDB_SIGNAL_0)
855 {
856 if (debug_infrun)
857 fprintf_unfiltered (gdb_stdlog,
858 "infrun: resuming vfork parent thread %s\n",
859 target_pid_to_str (thread->ptid));
860
861 switch_to_thread (thread->ptid);
862 clear_proceed_status (0);
863 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
864 }
865
866 return 0;
867 }
868
869 /* Called whenever we notice an exec or exit event, to handle
870 detaching or resuming a vfork parent. */
871
872 static void
873 handle_vfork_child_exec_or_exit (int exec)
874 {
875 struct inferior *inf = current_inferior ();
876
877 if (inf->vfork_parent)
878 {
879 int resume_parent = -1;
880
881 /* This exec or exit marks the end of the shared memory region
882 between the parent and the child. If the user wanted to
883 detach from the parent, now is the time. */
884
885 if (inf->vfork_parent->pending_detach)
886 {
887 struct thread_info *tp;
888 struct cleanup *old_chain;
889 struct program_space *pspace;
890 struct address_space *aspace;
891
892 /* follow-fork child, detach-on-fork on. */
893
894 inf->vfork_parent->pending_detach = 0;
895
896 if (!exec)
897 {
898 /* If we're handling a child exit, then inferior_ptid
899 points at the inferior's pid, not to a thread. */
900 old_chain = save_inferior_ptid ();
901 save_current_program_space ();
902 save_current_inferior ();
903 }
904 else
905 old_chain = save_current_space_and_thread ();
906
907 /* We're letting loose of the parent. */
908 tp = any_live_thread_of_process (inf->vfork_parent->pid);
909 switch_to_thread (tp->ptid);
910
911 /* We're about to detach from the parent, which implicitly
912 removes breakpoints from its address space. There's a
913 catch here: we want to reuse the spaces for the child,
914 but, parent/child are still sharing the pspace at this
915 point, although the exec in reality makes the kernel give
916 the child a fresh set of new pages. The problem here is
917 that the breakpoints module being unaware of this, would
918 likely chose the child process to write to the parent
919 address space. Swapping the child temporarily away from
920 the spaces has the desired effect. Yes, this is "sort
921 of" a hack. */
922
923 pspace = inf->pspace;
924 aspace = inf->aspace;
925 inf->aspace = NULL;
926 inf->pspace = NULL;
927
928 if (debug_infrun || info_verbose)
929 {
930 target_terminal_ours ();
931
932 if (exec)
933 fprintf_filtered (gdb_stdlog,
934 "Detaching vfork parent process "
935 "%d after child exec.\n",
936 inf->vfork_parent->pid);
937 else
938 fprintf_filtered (gdb_stdlog,
939 "Detaching vfork parent process "
940 "%d after child exit.\n",
941 inf->vfork_parent->pid);
942 }
943
944 target_detach (NULL, 0);
945
946 /* Put it back. */
947 inf->pspace = pspace;
948 inf->aspace = aspace;
949
950 do_cleanups (old_chain);
951 }
952 else if (exec)
953 {
954 /* We're staying attached to the parent, so, really give the
955 child a new address space. */
956 inf->pspace = add_program_space (maybe_new_address_space ());
957 inf->aspace = inf->pspace->aspace;
958 inf->removable = 1;
959 set_current_program_space (inf->pspace);
960
961 resume_parent = inf->vfork_parent->pid;
962
963 /* Break the bonds. */
964 inf->vfork_parent->vfork_child = NULL;
965 }
966 else
967 {
968 struct cleanup *old_chain;
969 struct program_space *pspace;
970
971 /* If this is a vfork child exiting, then the pspace and
972 aspaces were shared with the parent. Since we're
973 reporting the process exit, we'll be mourning all that is
974 found in the address space, and switching to null_ptid,
975 preparing to start a new inferior. But, since we don't
976 want to clobber the parent's address/program spaces, we
977 go ahead and create a new one for this exiting
978 inferior. */
979
980 /* Switch to null_ptid, so that clone_program_space doesn't want
981 to read the selected frame of a dead process. */
982 old_chain = save_inferior_ptid ();
983 inferior_ptid = null_ptid;
984
985 /* This inferior is dead, so avoid giving the breakpoints
986 module the option to write through to it (cloning a
987 program space resets breakpoints). */
988 inf->aspace = NULL;
989 inf->pspace = NULL;
990 pspace = add_program_space (maybe_new_address_space ());
991 set_current_program_space (pspace);
992 inf->removable = 1;
993 inf->symfile_flags = SYMFILE_NO_READ;
994 clone_program_space (pspace, inf->vfork_parent->pspace);
995 inf->pspace = pspace;
996 inf->aspace = pspace->aspace;
997
998 /* Put back inferior_ptid. We'll continue mourning this
999 inferior. */
1000 do_cleanups (old_chain);
1001
1002 resume_parent = inf->vfork_parent->pid;
1003 /* Break the bonds. */
1004 inf->vfork_parent->vfork_child = NULL;
1005 }
1006
1007 inf->vfork_parent = NULL;
1008
1009 gdb_assert (current_program_space == inf->pspace);
1010
1011 if (non_stop && resume_parent != -1)
1012 {
1013 /* If the user wanted the parent to be running, let it go
1014 free now. */
1015 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1016
1017 if (debug_infrun)
1018 fprintf_unfiltered (gdb_stdlog,
1019 "infrun: resuming vfork parent process %d\n",
1020 resume_parent);
1021
1022 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1023
1024 do_cleanups (old_chain);
1025 }
1026 }
1027 }
1028
1029 /* Enum strings for "set|show follow-exec-mode". */
1030
1031 static const char follow_exec_mode_new[] = "new";
1032 static const char follow_exec_mode_same[] = "same";
1033 static const char *const follow_exec_mode_names[] =
1034 {
1035 follow_exec_mode_new,
1036 follow_exec_mode_same,
1037 NULL,
1038 };
1039
1040 static const char *follow_exec_mode_string = follow_exec_mode_same;
1041 static void
1042 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1043 struct cmd_list_element *c, const char *value)
1044 {
1045 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1046 }
1047
1048 /* EXECD_PATHNAME is assumed to be non-NULL. */
1049
1050 static void
1051 follow_exec (ptid_t pid, char *execd_pathname)
1052 {
1053 struct thread_info *th = inferior_thread ();
1054 struct inferior *inf = current_inferior ();
1055
1056 /* This is an exec event that we actually wish to pay attention to.
1057 Refresh our symbol table to the newly exec'd program, remove any
1058 momentary bp's, etc.
1059
1060 If there are breakpoints, they aren't really inserted now,
1061 since the exec() transformed our inferior into a fresh set
1062 of instructions.
1063
1064 We want to preserve symbolic breakpoints on the list, since
1065 we have hopes that they can be reset after the new a.out's
1066 symbol table is read.
1067
1068 However, any "raw" breakpoints must be removed from the list
1069 (e.g., the solib bp's), since their address is probably invalid
1070 now.
1071
1072 And, we DON'T want to call delete_breakpoints() here, since
1073 that may write the bp's "shadow contents" (the instruction
1074 value that was overwritten witha TRAP instruction). Since
1075 we now have a new a.out, those shadow contents aren't valid. */
1076
1077 mark_breakpoints_out ();
1078
1079 update_breakpoints_after_exec ();
1080
1081 /* If there was one, it's gone now. We cannot truly step-to-next
1082 statement through an exec(). */
1083 th->control.step_resume_breakpoint = NULL;
1084 th->control.exception_resume_breakpoint = NULL;
1085 th->control.step_range_start = 0;
1086 th->control.step_range_end = 0;
1087
1088 /* The target reports the exec event to the main thread, even if
1089 some other thread does the exec, and even if the main thread was
1090 already stopped --- if debugging in non-stop mode, it's possible
1091 the user had the main thread held stopped in the previous image
1092 --- release it now. This is the same behavior as step-over-exec
1093 with scheduler-locking on in all-stop mode. */
1094 th->stop_requested = 0;
1095
1096 /* What is this a.out's name? */
1097 printf_unfiltered (_("%s is executing new program: %s\n"),
1098 target_pid_to_str (inferior_ptid),
1099 execd_pathname);
1100
1101 /* We've followed the inferior through an exec. Therefore, the
1102 inferior has essentially been killed & reborn. */
1103
1104 gdb_flush (gdb_stdout);
1105
1106 breakpoint_init_inferior (inf_execd);
1107
1108 if (gdb_sysroot && *gdb_sysroot)
1109 {
1110 char *name = alloca (strlen (gdb_sysroot)
1111 + strlen (execd_pathname)
1112 + 1);
1113
1114 strcpy (name, gdb_sysroot);
1115 strcat (name, execd_pathname);
1116 execd_pathname = name;
1117 }
1118
1119 /* Reset the shared library package. This ensures that we get a
1120 shlib event when the child reaches "_start", at which point the
1121 dld will have had a chance to initialize the child. */
1122 /* Also, loading a symbol file below may trigger symbol lookups, and
1123 we don't want those to be satisfied by the libraries of the
1124 previous incarnation of this process. */
1125 no_shared_libraries (NULL, 0);
1126
1127 if (follow_exec_mode_string == follow_exec_mode_new)
1128 {
1129 struct program_space *pspace;
1130
1131 /* The user wants to keep the old inferior and program spaces
1132 around. Create a new fresh one, and switch to it. */
1133
1134 inf = add_inferior (current_inferior ()->pid);
1135 pspace = add_program_space (maybe_new_address_space ());
1136 inf->pspace = pspace;
1137 inf->aspace = pspace->aspace;
1138
1139 exit_inferior_num_silent (current_inferior ()->num);
1140
1141 set_current_inferior (inf);
1142 set_current_program_space (pspace);
1143 }
1144 else
1145 {
1146 /* The old description may no longer be fit for the new image.
1147 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1148 old description; we'll read a new one below. No need to do
1149 this on "follow-exec-mode new", as the old inferior stays
1150 around (its description is later cleared/refetched on
1151 restart). */
1152 target_clear_description ();
1153 }
1154
1155 gdb_assert (current_program_space == inf->pspace);
1156
1157 /* That a.out is now the one to use. */
1158 exec_file_attach (execd_pathname, 0);
1159
1160 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1161 (Position Independent Executable) main symbol file will get applied by
1162 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1163 the breakpoints with the zero displacement. */
1164
1165 symbol_file_add (execd_pathname,
1166 (inf->symfile_flags
1167 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1168 NULL, 0);
1169
1170 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1171 set_initial_language ();
1172
1173 /* If the target can specify a description, read it. Must do this
1174 after flipping to the new executable (because the target supplied
1175 description must be compatible with the executable's
1176 architecture, and the old executable may e.g., be 32-bit, while
1177 the new one 64-bit), and before anything involving memory or
1178 registers. */
1179 target_find_description ();
1180
1181 solib_create_inferior_hook (0);
1182
1183 jit_inferior_created_hook ();
1184
1185 breakpoint_re_set ();
1186
1187 /* Reinsert all breakpoints. (Those which were symbolic have
1188 been reset to the proper address in the new a.out, thanks
1189 to symbol_file_command...). */
1190 insert_breakpoints ();
1191
1192 /* The next resume of this inferior should bring it to the shlib
1193 startup breakpoints. (If the user had also set bp's on
1194 "main" from the old (parent) process, then they'll auto-
1195 matically get reset there in the new process.). */
1196 }
1197
1198 /* Non-zero if we just simulating a single-step. This is needed
1199 because we cannot remove the breakpoints in the inferior process
1200 until after the `wait' in `wait_for_inferior'. */
1201 static int singlestep_breakpoints_inserted_p = 0;
1202
1203 /* The thread we inserted single-step breakpoints for. */
1204 static ptid_t singlestep_ptid;
1205
1206 /* PC when we started this single-step. */
1207 static CORE_ADDR singlestep_pc;
1208
1209 /* Info about an instruction that is being stepped over. */
1210
1211 struct step_over_info
1212 {
1213 /* If we're stepping past a breakpoint, this is the address space
1214 and address of the instruction the breakpoint is set at. We'll
1215 skip inserting all breakpoints here. Valid iff ASPACE is
1216 non-NULL. */
1217 struct address_space *aspace;
1218 CORE_ADDR address;
1219
1220 /* The instruction being stepped over triggers a nonsteppable
1221 watchpoint. If true, we'll skip inserting watchpoints. */
1222 int nonsteppable_watchpoint_p;
1223 };
1224
1225 /* The step-over info of the location that is being stepped over.
1226
1227 Note that with async/breakpoint always-inserted mode, a user might
1228 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1229 being stepped over. As setting a new breakpoint inserts all
1230 breakpoints, we need to make sure the breakpoint being stepped over
1231 isn't inserted then. We do that by only clearing the step-over
1232 info when the step-over is actually finished (or aborted).
1233
1234 Presently GDB can only step over one breakpoint at any given time.
1235 Given threads that can't run code in the same address space as the
1236 breakpoint's can't really miss the breakpoint, GDB could be taught
1237 to step-over at most one breakpoint per address space (so this info
1238 could move to the address space object if/when GDB is extended).
1239 The set of breakpoints being stepped over will normally be much
1240 smaller than the set of all breakpoints, so a flag in the
1241 breakpoint location structure would be wasteful. A separate list
1242 also saves complexity and run-time, as otherwise we'd have to go
1243 through all breakpoint locations clearing their flag whenever we
1244 start a new sequence. Similar considerations weigh against storing
1245 this info in the thread object. Plus, not all step overs actually
1246 have breakpoint locations -- e.g., stepping past a single-step
1247 breakpoint, or stepping to complete a non-continuable
1248 watchpoint. */
1249 static struct step_over_info step_over_info;
1250
1251 /* Record the address of the breakpoint/instruction we're currently
1252 stepping over. */
1253
1254 static void
1255 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1256 int nonsteppable_watchpoint_p)
1257 {
1258 step_over_info.aspace = aspace;
1259 step_over_info.address = address;
1260 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1261 }
1262
1263 /* Called when we're not longer stepping over a breakpoint / an
1264 instruction, so all breakpoints are free to be (re)inserted. */
1265
1266 static void
1267 clear_step_over_info (void)
1268 {
1269 step_over_info.aspace = NULL;
1270 step_over_info.address = 0;
1271 step_over_info.nonsteppable_watchpoint_p = 0;
1272 }
1273
1274 /* See infrun.h. */
1275
1276 int
1277 stepping_past_instruction_at (struct address_space *aspace,
1278 CORE_ADDR address)
1279 {
1280 return (step_over_info.aspace != NULL
1281 && breakpoint_address_match (aspace, address,
1282 step_over_info.aspace,
1283 step_over_info.address));
1284 }
1285
1286 /* See infrun.h. */
1287
1288 int
1289 stepping_past_nonsteppable_watchpoint (void)
1290 {
1291 return step_over_info.nonsteppable_watchpoint_p;
1292 }
1293
1294 /* Returns true if step-over info is valid. */
1295
1296 static int
1297 step_over_info_valid_p (void)
1298 {
1299 return (step_over_info.aspace != NULL
1300 || stepping_past_nonsteppable_watchpoint ());
1301 }
1302
1303 \f
1304 /* Displaced stepping. */
1305
1306 /* In non-stop debugging mode, we must take special care to manage
1307 breakpoints properly; in particular, the traditional strategy for
1308 stepping a thread past a breakpoint it has hit is unsuitable.
1309 'Displaced stepping' is a tactic for stepping one thread past a
1310 breakpoint it has hit while ensuring that other threads running
1311 concurrently will hit the breakpoint as they should.
1312
1313 The traditional way to step a thread T off a breakpoint in a
1314 multi-threaded program in all-stop mode is as follows:
1315
1316 a0) Initially, all threads are stopped, and breakpoints are not
1317 inserted.
1318 a1) We single-step T, leaving breakpoints uninserted.
1319 a2) We insert breakpoints, and resume all threads.
1320
1321 In non-stop debugging, however, this strategy is unsuitable: we
1322 don't want to have to stop all threads in the system in order to
1323 continue or step T past a breakpoint. Instead, we use displaced
1324 stepping:
1325
1326 n0) Initially, T is stopped, other threads are running, and
1327 breakpoints are inserted.
1328 n1) We copy the instruction "under" the breakpoint to a separate
1329 location, outside the main code stream, making any adjustments
1330 to the instruction, register, and memory state as directed by
1331 T's architecture.
1332 n2) We single-step T over the instruction at its new location.
1333 n3) We adjust the resulting register and memory state as directed
1334 by T's architecture. This includes resetting T's PC to point
1335 back into the main instruction stream.
1336 n4) We resume T.
1337
1338 This approach depends on the following gdbarch methods:
1339
1340 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1341 indicate where to copy the instruction, and how much space must
1342 be reserved there. We use these in step n1.
1343
1344 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1345 address, and makes any necessary adjustments to the instruction,
1346 register contents, and memory. We use this in step n1.
1347
1348 - gdbarch_displaced_step_fixup adjusts registers and memory after
1349 we have successfuly single-stepped the instruction, to yield the
1350 same effect the instruction would have had if we had executed it
1351 at its original address. We use this in step n3.
1352
1353 - gdbarch_displaced_step_free_closure provides cleanup.
1354
1355 The gdbarch_displaced_step_copy_insn and
1356 gdbarch_displaced_step_fixup functions must be written so that
1357 copying an instruction with gdbarch_displaced_step_copy_insn,
1358 single-stepping across the copied instruction, and then applying
1359 gdbarch_displaced_insn_fixup should have the same effects on the
1360 thread's memory and registers as stepping the instruction in place
1361 would have. Exactly which responsibilities fall to the copy and
1362 which fall to the fixup is up to the author of those functions.
1363
1364 See the comments in gdbarch.sh for details.
1365
1366 Note that displaced stepping and software single-step cannot
1367 currently be used in combination, although with some care I think
1368 they could be made to. Software single-step works by placing
1369 breakpoints on all possible subsequent instructions; if the
1370 displaced instruction is a PC-relative jump, those breakpoints
1371 could fall in very strange places --- on pages that aren't
1372 executable, or at addresses that are not proper instruction
1373 boundaries. (We do generally let other threads run while we wait
1374 to hit the software single-step breakpoint, and they might
1375 encounter such a corrupted instruction.) One way to work around
1376 this would be to have gdbarch_displaced_step_copy_insn fully
1377 simulate the effect of PC-relative instructions (and return NULL)
1378 on architectures that use software single-stepping.
1379
1380 In non-stop mode, we can have independent and simultaneous step
1381 requests, so more than one thread may need to simultaneously step
1382 over a breakpoint. The current implementation assumes there is
1383 only one scratch space per process. In this case, we have to
1384 serialize access to the scratch space. If thread A wants to step
1385 over a breakpoint, but we are currently waiting for some other
1386 thread to complete a displaced step, we leave thread A stopped and
1387 place it in the displaced_step_request_queue. Whenever a displaced
1388 step finishes, we pick the next thread in the queue and start a new
1389 displaced step operation on it. See displaced_step_prepare and
1390 displaced_step_fixup for details. */
1391
1392 struct displaced_step_request
1393 {
1394 ptid_t ptid;
1395 struct displaced_step_request *next;
1396 };
1397
1398 /* Per-inferior displaced stepping state. */
1399 struct displaced_step_inferior_state
1400 {
1401 /* Pointer to next in linked list. */
1402 struct displaced_step_inferior_state *next;
1403
1404 /* The process this displaced step state refers to. */
1405 int pid;
1406
1407 /* A queue of pending displaced stepping requests. One entry per
1408 thread that needs to do a displaced step. */
1409 struct displaced_step_request *step_request_queue;
1410
1411 /* If this is not null_ptid, this is the thread carrying out a
1412 displaced single-step in process PID. This thread's state will
1413 require fixing up once it has completed its step. */
1414 ptid_t step_ptid;
1415
1416 /* The architecture the thread had when we stepped it. */
1417 struct gdbarch *step_gdbarch;
1418
1419 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1420 for post-step cleanup. */
1421 struct displaced_step_closure *step_closure;
1422
1423 /* The address of the original instruction, and the copy we
1424 made. */
1425 CORE_ADDR step_original, step_copy;
1426
1427 /* Saved contents of copy area. */
1428 gdb_byte *step_saved_copy;
1429 };
1430
1431 /* The list of states of processes involved in displaced stepping
1432 presently. */
1433 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1434
1435 /* Get the displaced stepping state of process PID. */
1436
1437 static struct displaced_step_inferior_state *
1438 get_displaced_stepping_state (int pid)
1439 {
1440 struct displaced_step_inferior_state *state;
1441
1442 for (state = displaced_step_inferior_states;
1443 state != NULL;
1444 state = state->next)
1445 if (state->pid == pid)
1446 return state;
1447
1448 return NULL;
1449 }
1450
1451 /* Add a new displaced stepping state for process PID to the displaced
1452 stepping state list, or return a pointer to an already existing
1453 entry, if it already exists. Never returns NULL. */
1454
1455 static struct displaced_step_inferior_state *
1456 add_displaced_stepping_state (int pid)
1457 {
1458 struct displaced_step_inferior_state *state;
1459
1460 for (state = displaced_step_inferior_states;
1461 state != NULL;
1462 state = state->next)
1463 if (state->pid == pid)
1464 return state;
1465
1466 state = xcalloc (1, sizeof (*state));
1467 state->pid = pid;
1468 state->next = displaced_step_inferior_states;
1469 displaced_step_inferior_states = state;
1470
1471 return state;
1472 }
1473
1474 /* If inferior is in displaced stepping, and ADDR equals to starting address
1475 of copy area, return corresponding displaced_step_closure. Otherwise,
1476 return NULL. */
1477
1478 struct displaced_step_closure*
1479 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1480 {
1481 struct displaced_step_inferior_state *displaced
1482 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1483
1484 /* If checking the mode of displaced instruction in copy area. */
1485 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1486 && (displaced->step_copy == addr))
1487 return displaced->step_closure;
1488
1489 return NULL;
1490 }
1491
1492 /* Remove the displaced stepping state of process PID. */
1493
1494 static void
1495 remove_displaced_stepping_state (int pid)
1496 {
1497 struct displaced_step_inferior_state *it, **prev_next_p;
1498
1499 gdb_assert (pid != 0);
1500
1501 it = displaced_step_inferior_states;
1502 prev_next_p = &displaced_step_inferior_states;
1503 while (it)
1504 {
1505 if (it->pid == pid)
1506 {
1507 *prev_next_p = it->next;
1508 xfree (it);
1509 return;
1510 }
1511
1512 prev_next_p = &it->next;
1513 it = *prev_next_p;
1514 }
1515 }
1516
1517 static void
1518 infrun_inferior_exit (struct inferior *inf)
1519 {
1520 remove_displaced_stepping_state (inf->pid);
1521 }
1522
1523 /* If ON, and the architecture supports it, GDB will use displaced
1524 stepping to step over breakpoints. If OFF, or if the architecture
1525 doesn't support it, GDB will instead use the traditional
1526 hold-and-step approach. If AUTO (which is the default), GDB will
1527 decide which technique to use to step over breakpoints depending on
1528 which of all-stop or non-stop mode is active --- displaced stepping
1529 in non-stop mode; hold-and-step in all-stop mode. */
1530
1531 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1532
1533 static void
1534 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1535 struct cmd_list_element *c,
1536 const char *value)
1537 {
1538 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1539 fprintf_filtered (file,
1540 _("Debugger's willingness to use displaced stepping "
1541 "to step over breakpoints is %s (currently %s).\n"),
1542 value, non_stop ? "on" : "off");
1543 else
1544 fprintf_filtered (file,
1545 _("Debugger's willingness to use displaced stepping "
1546 "to step over breakpoints is %s.\n"), value);
1547 }
1548
1549 /* Return non-zero if displaced stepping can/should be used to step
1550 over breakpoints. */
1551
1552 static int
1553 use_displaced_stepping (struct gdbarch *gdbarch)
1554 {
1555 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1556 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1557 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1558 && find_record_target () == NULL);
1559 }
1560
1561 /* Clean out any stray displaced stepping state. */
1562 static void
1563 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1564 {
1565 /* Indicate that there is no cleanup pending. */
1566 displaced->step_ptid = null_ptid;
1567
1568 if (displaced->step_closure)
1569 {
1570 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1571 displaced->step_closure);
1572 displaced->step_closure = NULL;
1573 }
1574 }
1575
1576 static void
1577 displaced_step_clear_cleanup (void *arg)
1578 {
1579 struct displaced_step_inferior_state *state = arg;
1580
1581 displaced_step_clear (state);
1582 }
1583
1584 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1585 void
1586 displaced_step_dump_bytes (struct ui_file *file,
1587 const gdb_byte *buf,
1588 size_t len)
1589 {
1590 int i;
1591
1592 for (i = 0; i < len; i++)
1593 fprintf_unfiltered (file, "%02x ", buf[i]);
1594 fputs_unfiltered ("\n", file);
1595 }
1596
1597 /* Prepare to single-step, using displaced stepping.
1598
1599 Note that we cannot use displaced stepping when we have a signal to
1600 deliver. If we have a signal to deliver and an instruction to step
1601 over, then after the step, there will be no indication from the
1602 target whether the thread entered a signal handler or ignored the
1603 signal and stepped over the instruction successfully --- both cases
1604 result in a simple SIGTRAP. In the first case we mustn't do a
1605 fixup, and in the second case we must --- but we can't tell which.
1606 Comments in the code for 'random signals' in handle_inferior_event
1607 explain how we handle this case instead.
1608
1609 Returns 1 if preparing was successful -- this thread is going to be
1610 stepped now; or 0 if displaced stepping this thread got queued. */
1611 static int
1612 displaced_step_prepare (ptid_t ptid)
1613 {
1614 struct cleanup *old_cleanups, *ignore_cleanups;
1615 struct thread_info *tp = find_thread_ptid (ptid);
1616 struct regcache *regcache = get_thread_regcache (ptid);
1617 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1618 CORE_ADDR original, copy;
1619 ULONGEST len;
1620 struct displaced_step_closure *closure;
1621 struct displaced_step_inferior_state *displaced;
1622 int status;
1623
1624 /* We should never reach this function if the architecture does not
1625 support displaced stepping. */
1626 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1627
1628 /* Disable range stepping while executing in the scratch pad. We
1629 want a single-step even if executing the displaced instruction in
1630 the scratch buffer lands within the stepping range (e.g., a
1631 jump/branch). */
1632 tp->control.may_range_step = 0;
1633
1634 /* We have to displaced step one thread at a time, as we only have
1635 access to a single scratch space per inferior. */
1636
1637 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1638
1639 if (!ptid_equal (displaced->step_ptid, null_ptid))
1640 {
1641 /* Already waiting for a displaced step to finish. Defer this
1642 request and place in queue. */
1643 struct displaced_step_request *req, *new_req;
1644
1645 if (debug_displaced)
1646 fprintf_unfiltered (gdb_stdlog,
1647 "displaced: defering step of %s\n",
1648 target_pid_to_str (ptid));
1649
1650 new_req = xmalloc (sizeof (*new_req));
1651 new_req->ptid = ptid;
1652 new_req->next = NULL;
1653
1654 if (displaced->step_request_queue)
1655 {
1656 for (req = displaced->step_request_queue;
1657 req && req->next;
1658 req = req->next)
1659 ;
1660 req->next = new_req;
1661 }
1662 else
1663 displaced->step_request_queue = new_req;
1664
1665 return 0;
1666 }
1667 else
1668 {
1669 if (debug_displaced)
1670 fprintf_unfiltered (gdb_stdlog,
1671 "displaced: stepping %s now\n",
1672 target_pid_to_str (ptid));
1673 }
1674
1675 displaced_step_clear (displaced);
1676
1677 old_cleanups = save_inferior_ptid ();
1678 inferior_ptid = ptid;
1679
1680 original = regcache_read_pc (regcache);
1681
1682 copy = gdbarch_displaced_step_location (gdbarch);
1683 len = gdbarch_max_insn_length (gdbarch);
1684
1685 /* Save the original contents of the copy area. */
1686 displaced->step_saved_copy = xmalloc (len);
1687 ignore_cleanups = make_cleanup (free_current_contents,
1688 &displaced->step_saved_copy);
1689 status = target_read_memory (copy, displaced->step_saved_copy, len);
1690 if (status != 0)
1691 throw_error (MEMORY_ERROR,
1692 _("Error accessing memory address %s (%s) for "
1693 "displaced-stepping scratch space."),
1694 paddress (gdbarch, copy), safe_strerror (status));
1695 if (debug_displaced)
1696 {
1697 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1698 paddress (gdbarch, copy));
1699 displaced_step_dump_bytes (gdb_stdlog,
1700 displaced->step_saved_copy,
1701 len);
1702 };
1703
1704 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1705 original, copy, regcache);
1706
1707 /* We don't support the fully-simulated case at present. */
1708 gdb_assert (closure);
1709
1710 /* Save the information we need to fix things up if the step
1711 succeeds. */
1712 displaced->step_ptid = ptid;
1713 displaced->step_gdbarch = gdbarch;
1714 displaced->step_closure = closure;
1715 displaced->step_original = original;
1716 displaced->step_copy = copy;
1717
1718 make_cleanup (displaced_step_clear_cleanup, displaced);
1719
1720 /* Resume execution at the copy. */
1721 regcache_write_pc (regcache, copy);
1722
1723 discard_cleanups (ignore_cleanups);
1724
1725 do_cleanups (old_cleanups);
1726
1727 if (debug_displaced)
1728 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1729 paddress (gdbarch, copy));
1730
1731 return 1;
1732 }
1733
1734 static void
1735 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1736 const gdb_byte *myaddr, int len)
1737 {
1738 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1739
1740 inferior_ptid = ptid;
1741 write_memory (memaddr, myaddr, len);
1742 do_cleanups (ptid_cleanup);
1743 }
1744
1745 /* Restore the contents of the copy area for thread PTID. */
1746
1747 static void
1748 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1749 ptid_t ptid)
1750 {
1751 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1752
1753 write_memory_ptid (ptid, displaced->step_copy,
1754 displaced->step_saved_copy, len);
1755 if (debug_displaced)
1756 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1757 target_pid_to_str (ptid),
1758 paddress (displaced->step_gdbarch,
1759 displaced->step_copy));
1760 }
1761
1762 static void
1763 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1764 {
1765 struct cleanup *old_cleanups;
1766 struct displaced_step_inferior_state *displaced
1767 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1768
1769 /* Was any thread of this process doing a displaced step? */
1770 if (displaced == NULL)
1771 return;
1772
1773 /* Was this event for the pid we displaced? */
1774 if (ptid_equal (displaced->step_ptid, null_ptid)
1775 || ! ptid_equal (displaced->step_ptid, event_ptid))
1776 return;
1777
1778 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1779
1780 displaced_step_restore (displaced, displaced->step_ptid);
1781
1782 /* Did the instruction complete successfully? */
1783 if (signal == GDB_SIGNAL_TRAP)
1784 {
1785 /* Fix up the resulting state. */
1786 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1787 displaced->step_closure,
1788 displaced->step_original,
1789 displaced->step_copy,
1790 get_thread_regcache (displaced->step_ptid));
1791 }
1792 else
1793 {
1794 /* Since the instruction didn't complete, all we can do is
1795 relocate the PC. */
1796 struct regcache *regcache = get_thread_regcache (event_ptid);
1797 CORE_ADDR pc = regcache_read_pc (regcache);
1798
1799 pc = displaced->step_original + (pc - displaced->step_copy);
1800 regcache_write_pc (regcache, pc);
1801 }
1802
1803 do_cleanups (old_cleanups);
1804
1805 displaced->step_ptid = null_ptid;
1806
1807 /* Are there any pending displaced stepping requests? If so, run
1808 one now. Leave the state object around, since we're likely to
1809 need it again soon. */
1810 while (displaced->step_request_queue)
1811 {
1812 struct displaced_step_request *head;
1813 ptid_t ptid;
1814 struct regcache *regcache;
1815 struct gdbarch *gdbarch;
1816 CORE_ADDR actual_pc;
1817 struct address_space *aspace;
1818
1819 head = displaced->step_request_queue;
1820 ptid = head->ptid;
1821 displaced->step_request_queue = head->next;
1822 xfree (head);
1823
1824 context_switch (ptid);
1825
1826 regcache = get_thread_regcache (ptid);
1827 actual_pc = regcache_read_pc (regcache);
1828 aspace = get_regcache_aspace (regcache);
1829
1830 if (breakpoint_here_p (aspace, actual_pc))
1831 {
1832 if (debug_displaced)
1833 fprintf_unfiltered (gdb_stdlog,
1834 "displaced: stepping queued %s now\n",
1835 target_pid_to_str (ptid));
1836
1837 displaced_step_prepare (ptid);
1838
1839 gdbarch = get_regcache_arch (regcache);
1840
1841 if (debug_displaced)
1842 {
1843 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1844 gdb_byte buf[4];
1845
1846 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1847 paddress (gdbarch, actual_pc));
1848 read_memory (actual_pc, buf, sizeof (buf));
1849 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1850 }
1851
1852 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1853 displaced->step_closure))
1854 target_resume (ptid, 1, GDB_SIGNAL_0);
1855 else
1856 target_resume (ptid, 0, GDB_SIGNAL_0);
1857
1858 /* Done, we're stepping a thread. */
1859 break;
1860 }
1861 else
1862 {
1863 int step;
1864 struct thread_info *tp = inferior_thread ();
1865
1866 /* The breakpoint we were sitting under has since been
1867 removed. */
1868 tp->control.trap_expected = 0;
1869
1870 /* Go back to what we were trying to do. */
1871 step = currently_stepping (tp);
1872
1873 if (debug_displaced)
1874 fprintf_unfiltered (gdb_stdlog,
1875 "displaced: breakpoint is gone: %s, step(%d)\n",
1876 target_pid_to_str (tp->ptid), step);
1877
1878 target_resume (ptid, step, GDB_SIGNAL_0);
1879 tp->suspend.stop_signal = GDB_SIGNAL_0;
1880
1881 /* This request was discarded. See if there's any other
1882 thread waiting for its turn. */
1883 }
1884 }
1885 }
1886
1887 /* Update global variables holding ptids to hold NEW_PTID if they were
1888 holding OLD_PTID. */
1889 static void
1890 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1891 {
1892 struct displaced_step_request *it;
1893 struct displaced_step_inferior_state *displaced;
1894
1895 if (ptid_equal (inferior_ptid, old_ptid))
1896 inferior_ptid = new_ptid;
1897
1898 if (ptid_equal (singlestep_ptid, old_ptid))
1899 singlestep_ptid = new_ptid;
1900
1901 for (displaced = displaced_step_inferior_states;
1902 displaced;
1903 displaced = displaced->next)
1904 {
1905 if (ptid_equal (displaced->step_ptid, old_ptid))
1906 displaced->step_ptid = new_ptid;
1907
1908 for (it = displaced->step_request_queue; it; it = it->next)
1909 if (ptid_equal (it->ptid, old_ptid))
1910 it->ptid = new_ptid;
1911 }
1912 }
1913
1914 \f
1915 /* Resuming. */
1916
1917 /* Things to clean up if we QUIT out of resume (). */
1918 static void
1919 resume_cleanups (void *ignore)
1920 {
1921 normal_stop ();
1922 }
1923
1924 static const char schedlock_off[] = "off";
1925 static const char schedlock_on[] = "on";
1926 static const char schedlock_step[] = "step";
1927 static const char *const scheduler_enums[] = {
1928 schedlock_off,
1929 schedlock_on,
1930 schedlock_step,
1931 NULL
1932 };
1933 static const char *scheduler_mode = schedlock_off;
1934 static void
1935 show_scheduler_mode (struct ui_file *file, int from_tty,
1936 struct cmd_list_element *c, const char *value)
1937 {
1938 fprintf_filtered (file,
1939 _("Mode for locking scheduler "
1940 "during execution is \"%s\".\n"),
1941 value);
1942 }
1943
1944 static void
1945 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1946 {
1947 if (!target_can_lock_scheduler)
1948 {
1949 scheduler_mode = schedlock_off;
1950 error (_("Target '%s' cannot support this command."), target_shortname);
1951 }
1952 }
1953
1954 /* True if execution commands resume all threads of all processes by
1955 default; otherwise, resume only threads of the current inferior
1956 process. */
1957 int sched_multi = 0;
1958
1959 /* Try to setup for software single stepping over the specified location.
1960 Return 1 if target_resume() should use hardware single step.
1961
1962 GDBARCH the current gdbarch.
1963 PC the location to step over. */
1964
1965 static int
1966 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1967 {
1968 int hw_step = 1;
1969
1970 if (execution_direction == EXEC_FORWARD
1971 && gdbarch_software_single_step_p (gdbarch)
1972 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1973 {
1974 hw_step = 0;
1975 /* Do not pull these breakpoints until after a `wait' in
1976 `wait_for_inferior'. */
1977 singlestep_breakpoints_inserted_p = 1;
1978 singlestep_ptid = inferior_ptid;
1979 singlestep_pc = pc;
1980 }
1981 return hw_step;
1982 }
1983
1984 ptid_t
1985 user_visible_resume_ptid (int step)
1986 {
1987 /* By default, resume all threads of all processes. */
1988 ptid_t resume_ptid = RESUME_ALL;
1989
1990 /* Maybe resume only all threads of the current process. */
1991 if (!sched_multi && target_supports_multi_process ())
1992 {
1993 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1994 }
1995
1996 /* Maybe resume a single thread after all. */
1997 if (non_stop)
1998 {
1999 /* With non-stop mode on, threads are always handled
2000 individually. */
2001 resume_ptid = inferior_ptid;
2002 }
2003 else if ((scheduler_mode == schedlock_on)
2004 || (scheduler_mode == schedlock_step && step))
2005 {
2006 /* User-settable 'scheduler' mode requires solo thread resume. */
2007 resume_ptid = inferior_ptid;
2008 }
2009
2010 /* We may actually resume fewer threads at first, e.g., if a thread
2011 is stopped at a breakpoint that needs stepping-off, but that
2012 should not be visible to the user/frontend, and neither should
2013 the frontend/user be allowed to proceed any of the threads that
2014 happen to be stopped for internal run control handling, if a
2015 previous command wanted them resumed. */
2016 return resume_ptid;
2017 }
2018
2019 /* Resume the inferior, but allow a QUIT. This is useful if the user
2020 wants to interrupt some lengthy single-stepping operation
2021 (for child processes, the SIGINT goes to the inferior, and so
2022 we get a SIGINT random_signal, but for remote debugging and perhaps
2023 other targets, that's not true).
2024
2025 STEP nonzero if we should step (zero to continue instead).
2026 SIG is the signal to give the inferior (zero for none). */
2027 void
2028 resume (int step, enum gdb_signal sig)
2029 {
2030 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2031 struct regcache *regcache = get_current_regcache ();
2032 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2033 struct thread_info *tp = inferior_thread ();
2034 CORE_ADDR pc = regcache_read_pc (regcache);
2035 struct address_space *aspace = get_regcache_aspace (regcache);
2036 ptid_t resume_ptid;
2037 /* From here on, this represents the caller's step vs continue
2038 request, while STEP represents what we'll actually request the
2039 target to do. STEP can decay from a step to a continue, if e.g.,
2040 we need to implement single-stepping with breakpoints (software
2041 single-step). When deciding whether "set scheduler-locking step"
2042 applies, it's the callers intention that counts. */
2043 const int entry_step = step;
2044
2045 QUIT;
2046
2047 if (current_inferior ()->waiting_for_vfork_done)
2048 {
2049 /* Don't try to single-step a vfork parent that is waiting for
2050 the child to get out of the shared memory region (by exec'ing
2051 or exiting). This is particularly important on software
2052 single-step archs, as the child process would trip on the
2053 software single step breakpoint inserted for the parent
2054 process. Since the parent will not actually execute any
2055 instruction until the child is out of the shared region (such
2056 are vfork's semantics), it is safe to simply continue it.
2057 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2058 the parent, and tell it to `keep_going', which automatically
2059 re-sets it stepping. */
2060 if (debug_infrun)
2061 fprintf_unfiltered (gdb_stdlog,
2062 "infrun: resume : clear step\n");
2063 step = 0;
2064 }
2065
2066 if (debug_infrun)
2067 fprintf_unfiltered (gdb_stdlog,
2068 "infrun: resume (step=%d, signal=%s), "
2069 "trap_expected=%d, current thread [%s] at %s\n",
2070 step, gdb_signal_to_symbol_string (sig),
2071 tp->control.trap_expected,
2072 target_pid_to_str (inferior_ptid),
2073 paddress (gdbarch, pc));
2074
2075 /* Normally, by the time we reach `resume', the breakpoints are either
2076 removed or inserted, as appropriate. The exception is if we're sitting
2077 at a permanent breakpoint; we need to step over it, but permanent
2078 breakpoints can't be removed. So we have to test for it here. */
2079 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2080 {
2081 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
2082 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2083 else
2084 error (_("\
2085 The program is stopped at a permanent breakpoint, but GDB does not know\n\
2086 how to step past a permanent breakpoint on this architecture. Try using\n\
2087 a command like `return' or `jump' to continue execution."));
2088 }
2089
2090 /* If we have a breakpoint to step over, make sure to do a single
2091 step only. Same if we have software watchpoints. */
2092 if (tp->control.trap_expected || bpstat_should_step ())
2093 tp->control.may_range_step = 0;
2094
2095 /* If enabled, step over breakpoints by executing a copy of the
2096 instruction at a different address.
2097
2098 We can't use displaced stepping when we have a signal to deliver;
2099 the comments for displaced_step_prepare explain why. The
2100 comments in the handle_inferior event for dealing with 'random
2101 signals' explain what we do instead.
2102
2103 We can't use displaced stepping when we are waiting for vfork_done
2104 event, displaced stepping breaks the vfork child similarly as single
2105 step software breakpoint. */
2106 if (use_displaced_stepping (gdbarch)
2107 && (tp->control.trap_expected
2108 || (step && gdbarch_software_single_step_p (gdbarch)))
2109 && sig == GDB_SIGNAL_0
2110 && !current_inferior ()->waiting_for_vfork_done)
2111 {
2112 struct displaced_step_inferior_state *displaced;
2113
2114 if (!displaced_step_prepare (inferior_ptid))
2115 {
2116 /* Got placed in displaced stepping queue. Will be resumed
2117 later when all the currently queued displaced stepping
2118 requests finish. The thread is not executing at this
2119 point, and the call to set_executing will be made later.
2120 But we need to call set_running here, since from the
2121 user/frontend's point of view, threads were set running.
2122 Unless we're calling an inferior function, as in that
2123 case we pretend the inferior doesn't run at all. */
2124 if (!tp->control.in_infcall)
2125 set_running (user_visible_resume_ptid (entry_step), 1);
2126 discard_cleanups (old_cleanups);
2127 return;
2128 }
2129
2130 /* Update pc to reflect the new address from which we will execute
2131 instructions due to displaced stepping. */
2132 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2133
2134 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2135 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2136 displaced->step_closure);
2137 }
2138
2139 /* Do we need to do it the hard way, w/temp breakpoints? */
2140 else if (step)
2141 step = maybe_software_singlestep (gdbarch, pc);
2142
2143 /* Currently, our software single-step implementation leads to different
2144 results than hardware single-stepping in one situation: when stepping
2145 into delivering a signal which has an associated signal handler,
2146 hardware single-step will stop at the first instruction of the handler,
2147 while software single-step will simply skip execution of the handler.
2148
2149 For now, this difference in behavior is accepted since there is no
2150 easy way to actually implement single-stepping into a signal handler
2151 without kernel support.
2152
2153 However, there is one scenario where this difference leads to follow-on
2154 problems: if we're stepping off a breakpoint by removing all breakpoints
2155 and then single-stepping. In this case, the software single-step
2156 behavior means that even if there is a *breakpoint* in the signal
2157 handler, GDB still would not stop.
2158
2159 Fortunately, we can at least fix this particular issue. We detect
2160 here the case where we are about to deliver a signal while software
2161 single-stepping with breakpoints removed. In this situation, we
2162 revert the decisions to remove all breakpoints and insert single-
2163 step breakpoints, and instead we install a step-resume breakpoint
2164 at the current address, deliver the signal without stepping, and
2165 once we arrive back at the step-resume breakpoint, actually step
2166 over the breakpoint we originally wanted to step over. */
2167 if (singlestep_breakpoints_inserted_p
2168 && sig != GDB_SIGNAL_0
2169 && step_over_info_valid_p ())
2170 {
2171 /* If we have nested signals or a pending signal is delivered
2172 immediately after a handler returns, might might already have
2173 a step-resume breakpoint set on the earlier handler. We cannot
2174 set another step-resume breakpoint; just continue on until the
2175 original breakpoint is hit. */
2176 if (tp->control.step_resume_breakpoint == NULL)
2177 {
2178 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2179 tp->step_after_step_resume_breakpoint = 1;
2180 }
2181
2182 remove_single_step_breakpoints ();
2183 singlestep_breakpoints_inserted_p = 0;
2184
2185 clear_step_over_info ();
2186 tp->control.trap_expected = 0;
2187
2188 insert_breakpoints ();
2189 }
2190
2191 /* If STEP is set, it's a request to use hardware stepping
2192 facilities. But in that case, we should never
2193 use singlestep breakpoint. */
2194 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
2195
2196 /* Decide the set of threads to ask the target to resume. Start
2197 by assuming everything will be resumed, than narrow the set
2198 by applying increasingly restricting conditions. */
2199 resume_ptid = user_visible_resume_ptid (entry_step);
2200
2201 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2202 (e.g., we might need to step over a breakpoint), from the
2203 user/frontend's point of view, all threads in RESUME_PTID are now
2204 running. Unless we're calling an inferior function, as in that
2205 case pretend we inferior doesn't run at all. */
2206 if (!tp->control.in_infcall)
2207 set_running (resume_ptid, 1);
2208
2209 /* Maybe resume a single thread after all. */
2210 if ((step || singlestep_breakpoints_inserted_p)
2211 && tp->control.trap_expected)
2212 {
2213 /* We're allowing a thread to run past a breakpoint it has
2214 hit, by single-stepping the thread with the breakpoint
2215 removed. In which case, we need to single-step only this
2216 thread, and keep others stopped, as they can miss this
2217 breakpoint if allowed to run. */
2218 resume_ptid = inferior_ptid;
2219 }
2220
2221 if (gdbarch_cannot_step_breakpoint (gdbarch))
2222 {
2223 /* Most targets can step a breakpoint instruction, thus
2224 executing it normally. But if this one cannot, just
2225 continue and we will hit it anyway. */
2226 if (step && breakpoint_inserted_here_p (aspace, pc))
2227 step = 0;
2228 }
2229
2230 if (debug_displaced
2231 && use_displaced_stepping (gdbarch)
2232 && tp->control.trap_expected)
2233 {
2234 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
2235 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2236 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2237 gdb_byte buf[4];
2238
2239 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2240 paddress (resume_gdbarch, actual_pc));
2241 read_memory (actual_pc, buf, sizeof (buf));
2242 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2243 }
2244
2245 if (tp->control.may_range_step)
2246 {
2247 /* If we're resuming a thread with the PC out of the step
2248 range, then we're doing some nested/finer run control
2249 operation, like stepping the thread out of the dynamic
2250 linker or the displaced stepping scratch pad. We
2251 shouldn't have allowed a range step then. */
2252 gdb_assert (pc_in_thread_step_range (pc, tp));
2253 }
2254
2255 /* Install inferior's terminal modes. */
2256 target_terminal_inferior ();
2257
2258 /* Avoid confusing the next resume, if the next stop/resume
2259 happens to apply to another thread. */
2260 tp->suspend.stop_signal = GDB_SIGNAL_0;
2261
2262 /* Advise target which signals may be handled silently. If we have
2263 removed breakpoints because we are stepping over one (in any
2264 thread), we need to receive all signals to avoid accidentally
2265 skipping a breakpoint during execution of a signal handler. */
2266 if (step_over_info_valid_p ())
2267 target_pass_signals (0, NULL);
2268 else
2269 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2270
2271 target_resume (resume_ptid, step, sig);
2272
2273 discard_cleanups (old_cleanups);
2274 }
2275 \f
2276 /* Proceeding. */
2277
2278 /* Clear out all variables saying what to do when inferior is continued.
2279 First do this, then set the ones you want, then call `proceed'. */
2280
2281 static void
2282 clear_proceed_status_thread (struct thread_info *tp)
2283 {
2284 if (debug_infrun)
2285 fprintf_unfiltered (gdb_stdlog,
2286 "infrun: clear_proceed_status_thread (%s)\n",
2287 target_pid_to_str (tp->ptid));
2288
2289 /* If this signal should not be seen by program, give it zero.
2290 Used for debugging signals. */
2291 if (!signal_pass_state (tp->suspend.stop_signal))
2292 tp->suspend.stop_signal = GDB_SIGNAL_0;
2293
2294 tp->control.trap_expected = 0;
2295 tp->control.step_range_start = 0;
2296 tp->control.step_range_end = 0;
2297 tp->control.may_range_step = 0;
2298 tp->control.step_frame_id = null_frame_id;
2299 tp->control.step_stack_frame_id = null_frame_id;
2300 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2301 tp->stop_requested = 0;
2302
2303 tp->control.stop_step = 0;
2304
2305 tp->control.proceed_to_finish = 0;
2306
2307 tp->control.command_interp = NULL;
2308
2309 /* Discard any remaining commands or status from previous stop. */
2310 bpstat_clear (&tp->control.stop_bpstat);
2311 }
2312
2313 void
2314 clear_proceed_status (int step)
2315 {
2316 if (!non_stop)
2317 {
2318 struct thread_info *tp;
2319 ptid_t resume_ptid;
2320
2321 resume_ptid = user_visible_resume_ptid (step);
2322
2323 /* In all-stop mode, delete the per-thread status of all threads
2324 we're about to resume, implicitly and explicitly. */
2325 ALL_NON_EXITED_THREADS (tp)
2326 {
2327 if (!ptid_match (tp->ptid, resume_ptid))
2328 continue;
2329 clear_proceed_status_thread (tp);
2330 }
2331 }
2332
2333 if (!ptid_equal (inferior_ptid, null_ptid))
2334 {
2335 struct inferior *inferior;
2336
2337 if (non_stop)
2338 {
2339 /* If in non-stop mode, only delete the per-thread status of
2340 the current thread. */
2341 clear_proceed_status_thread (inferior_thread ());
2342 }
2343
2344 inferior = current_inferior ();
2345 inferior->control.stop_soon = NO_STOP_QUIETLY;
2346 }
2347
2348 stop_after_trap = 0;
2349
2350 clear_step_over_info ();
2351
2352 observer_notify_about_to_proceed ();
2353
2354 if (stop_registers)
2355 {
2356 regcache_xfree (stop_registers);
2357 stop_registers = NULL;
2358 }
2359 }
2360
2361 /* Returns true if TP is still stopped at a breakpoint that needs
2362 stepping-over in order to make progress. If the breakpoint is gone
2363 meanwhile, we can skip the whole step-over dance. */
2364
2365 static int
2366 thread_still_needs_step_over (struct thread_info *tp)
2367 {
2368 if (tp->stepping_over_breakpoint)
2369 {
2370 struct regcache *regcache = get_thread_regcache (tp->ptid);
2371
2372 if (breakpoint_here_p (get_regcache_aspace (regcache),
2373 regcache_read_pc (regcache)))
2374 return 1;
2375
2376 tp->stepping_over_breakpoint = 0;
2377 }
2378
2379 return 0;
2380 }
2381
2382 /* Returns true if scheduler locking applies. STEP indicates whether
2383 we're about to do a step/next-like command to a thread. */
2384
2385 static int
2386 schedlock_applies (int step)
2387 {
2388 return (scheduler_mode == schedlock_on
2389 || (scheduler_mode == schedlock_step
2390 && step));
2391 }
2392
2393 /* Look a thread other than EXCEPT that has previously reported a
2394 breakpoint event, and thus needs a step-over in order to make
2395 progress. Returns NULL is none is found. STEP indicates whether
2396 we're about to step the current thread, in order to decide whether
2397 "set scheduler-locking step" applies. */
2398
2399 static struct thread_info *
2400 find_thread_needs_step_over (int step, struct thread_info *except)
2401 {
2402 struct thread_info *tp, *current;
2403
2404 /* With non-stop mode on, threads are always handled individually. */
2405 gdb_assert (! non_stop);
2406
2407 current = inferior_thread ();
2408
2409 /* If scheduler locking applies, we can avoid iterating over all
2410 threads. */
2411 if (schedlock_applies (step))
2412 {
2413 if (except != current
2414 && thread_still_needs_step_over (current))
2415 return current;
2416
2417 return NULL;
2418 }
2419
2420 ALL_NON_EXITED_THREADS (tp)
2421 {
2422 /* Ignore the EXCEPT thread. */
2423 if (tp == except)
2424 continue;
2425 /* Ignore threads of processes we're not resuming. */
2426 if (!sched_multi
2427 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2428 continue;
2429
2430 if (thread_still_needs_step_over (tp))
2431 return tp;
2432 }
2433
2434 return NULL;
2435 }
2436
2437 /* Basic routine for continuing the program in various fashions.
2438
2439 ADDR is the address to resume at, or -1 for resume where stopped.
2440 SIGGNAL is the signal to give it, or 0 for none,
2441 or -1 for act according to how it stopped.
2442 STEP is nonzero if should trap after one instruction.
2443 -1 means return after that and print nothing.
2444 You should probably set various step_... variables
2445 before calling here, if you are stepping.
2446
2447 You should call clear_proceed_status before calling proceed. */
2448
2449 void
2450 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2451 {
2452 struct regcache *regcache;
2453 struct gdbarch *gdbarch;
2454 struct thread_info *tp;
2455 CORE_ADDR pc;
2456 struct address_space *aspace;
2457
2458 /* If we're stopped at a fork/vfork, follow the branch set by the
2459 "set follow-fork-mode" command; otherwise, we'll just proceed
2460 resuming the current thread. */
2461 if (!follow_fork ())
2462 {
2463 /* The target for some reason decided not to resume. */
2464 normal_stop ();
2465 if (target_can_async_p ())
2466 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2467 return;
2468 }
2469
2470 /* We'll update this if & when we switch to a new thread. */
2471 previous_inferior_ptid = inferior_ptid;
2472
2473 regcache = get_current_regcache ();
2474 gdbarch = get_regcache_arch (regcache);
2475 aspace = get_regcache_aspace (regcache);
2476 pc = regcache_read_pc (regcache);
2477 tp = inferior_thread ();
2478
2479 if (step > 0)
2480 step_start_function = find_pc_function (pc);
2481 if (step < 0)
2482 stop_after_trap = 1;
2483
2484 /* Fill in with reasonable starting values. */
2485 init_thread_stepping_state (tp);
2486
2487 if (addr == (CORE_ADDR) -1)
2488 {
2489 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2490 && execution_direction != EXEC_REVERSE)
2491 /* There is a breakpoint at the address we will resume at,
2492 step one instruction before inserting breakpoints so that
2493 we do not stop right away (and report a second hit at this
2494 breakpoint).
2495
2496 Note, we don't do this in reverse, because we won't
2497 actually be executing the breakpoint insn anyway.
2498 We'll be (un-)executing the previous instruction. */
2499 tp->stepping_over_breakpoint = 1;
2500 else if (gdbarch_single_step_through_delay_p (gdbarch)
2501 && gdbarch_single_step_through_delay (gdbarch,
2502 get_current_frame ()))
2503 /* We stepped onto an instruction that needs to be stepped
2504 again before re-inserting the breakpoint, do so. */
2505 tp->stepping_over_breakpoint = 1;
2506 }
2507 else
2508 {
2509 regcache_write_pc (regcache, addr);
2510 }
2511
2512 if (siggnal != GDB_SIGNAL_DEFAULT)
2513 tp->suspend.stop_signal = siggnal;
2514
2515 /* Record the interpreter that issued the execution command that
2516 caused this thread to resume. If the top level interpreter is
2517 MI/async, and the execution command was a CLI command
2518 (next/step/etc.), we'll want to print stop event output to the MI
2519 console channel (the stepped-to line, etc.), as if the user
2520 entered the execution command on a real GDB console. */
2521 inferior_thread ()->control.command_interp = command_interp ();
2522
2523 if (debug_infrun)
2524 fprintf_unfiltered (gdb_stdlog,
2525 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2526 paddress (gdbarch, addr),
2527 gdb_signal_to_symbol_string (siggnal), step);
2528
2529 if (non_stop)
2530 /* In non-stop, each thread is handled individually. The context
2531 must already be set to the right thread here. */
2532 ;
2533 else
2534 {
2535 struct thread_info *step_over;
2536
2537 /* In a multi-threaded task we may select another thread and
2538 then continue or step.
2539
2540 But if the old thread was stopped at a breakpoint, it will
2541 immediately cause another breakpoint stop without any
2542 execution (i.e. it will report a breakpoint hit incorrectly).
2543 So we must step over it first.
2544
2545 Look for a thread other than the current (TP) that reported a
2546 breakpoint hit and hasn't been resumed yet since. */
2547 step_over = find_thread_needs_step_over (step, tp);
2548 if (step_over != NULL)
2549 {
2550 if (debug_infrun)
2551 fprintf_unfiltered (gdb_stdlog,
2552 "infrun: need to step-over [%s] first\n",
2553 target_pid_to_str (step_over->ptid));
2554
2555 /* Store the prev_pc for the stepping thread too, needed by
2556 switch_back_to_stepping thread. */
2557 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2558 switch_to_thread (step_over->ptid);
2559 tp = step_over;
2560 }
2561 }
2562
2563 /* If we need to step over a breakpoint, and we're not using
2564 displaced stepping to do so, insert all breakpoints (watchpoints,
2565 etc.) but the one we're stepping over, step one instruction, and
2566 then re-insert the breakpoint when that step is finished. */
2567 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2568 {
2569 struct regcache *regcache = get_current_regcache ();
2570
2571 set_step_over_info (get_regcache_aspace (regcache),
2572 regcache_read_pc (regcache), 0);
2573 }
2574 else
2575 clear_step_over_info ();
2576
2577 insert_breakpoints ();
2578
2579 tp->control.trap_expected = tp->stepping_over_breakpoint;
2580
2581 annotate_starting ();
2582
2583 /* Make sure that output from GDB appears before output from the
2584 inferior. */
2585 gdb_flush (gdb_stdout);
2586
2587 /* Refresh prev_pc value just prior to resuming. This used to be
2588 done in stop_waiting, however, setting prev_pc there did not handle
2589 scenarios such as inferior function calls or returning from
2590 a function via the return command. In those cases, the prev_pc
2591 value was not set properly for subsequent commands. The prev_pc value
2592 is used to initialize the starting line number in the ecs. With an
2593 invalid value, the gdb next command ends up stopping at the position
2594 represented by the next line table entry past our start position.
2595 On platforms that generate one line table entry per line, this
2596 is not a problem. However, on the ia64, the compiler generates
2597 extraneous line table entries that do not increase the line number.
2598 When we issue the gdb next command on the ia64 after an inferior call
2599 or a return command, we often end up a few instructions forward, still
2600 within the original line we started.
2601
2602 An attempt was made to refresh the prev_pc at the same time the
2603 execution_control_state is initialized (for instance, just before
2604 waiting for an inferior event). But this approach did not work
2605 because of platforms that use ptrace, where the pc register cannot
2606 be read unless the inferior is stopped. At that point, we are not
2607 guaranteed the inferior is stopped and so the regcache_read_pc() call
2608 can fail. Setting the prev_pc value here ensures the value is updated
2609 correctly when the inferior is stopped. */
2610 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2611
2612 /* Resume inferior. */
2613 resume (tp->control.trap_expected || step || bpstat_should_step (),
2614 tp->suspend.stop_signal);
2615
2616 /* Wait for it to stop (if not standalone)
2617 and in any case decode why it stopped, and act accordingly. */
2618 /* Do this only if we are not using the event loop, or if the target
2619 does not support asynchronous execution. */
2620 if (!target_can_async_p ())
2621 {
2622 wait_for_inferior ();
2623 normal_stop ();
2624 }
2625 }
2626 \f
2627
2628 /* Start remote-debugging of a machine over a serial link. */
2629
2630 void
2631 start_remote (int from_tty)
2632 {
2633 struct inferior *inferior;
2634
2635 inferior = current_inferior ();
2636 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2637
2638 /* Always go on waiting for the target, regardless of the mode. */
2639 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2640 indicate to wait_for_inferior that a target should timeout if
2641 nothing is returned (instead of just blocking). Because of this,
2642 targets expecting an immediate response need to, internally, set
2643 things up so that the target_wait() is forced to eventually
2644 timeout. */
2645 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2646 differentiate to its caller what the state of the target is after
2647 the initial open has been performed. Here we're assuming that
2648 the target has stopped. It should be possible to eventually have
2649 target_open() return to the caller an indication that the target
2650 is currently running and GDB state should be set to the same as
2651 for an async run. */
2652 wait_for_inferior ();
2653
2654 /* Now that the inferior has stopped, do any bookkeeping like
2655 loading shared libraries. We want to do this before normal_stop,
2656 so that the displayed frame is up to date. */
2657 post_create_inferior (&current_target, from_tty);
2658
2659 normal_stop ();
2660 }
2661
2662 /* Initialize static vars when a new inferior begins. */
2663
2664 void
2665 init_wait_for_inferior (void)
2666 {
2667 /* These are meaningless until the first time through wait_for_inferior. */
2668
2669 breakpoint_init_inferior (inf_starting);
2670
2671 clear_proceed_status (0);
2672
2673 target_last_wait_ptid = minus_one_ptid;
2674
2675 previous_inferior_ptid = inferior_ptid;
2676
2677 /* Discard any skipped inlined frames. */
2678 clear_inline_frame_state (minus_one_ptid);
2679
2680 singlestep_ptid = null_ptid;
2681 singlestep_pc = 0;
2682 }
2683
2684 \f
2685 /* This enum encodes possible reasons for doing a target_wait, so that
2686 wfi can call target_wait in one place. (Ultimately the call will be
2687 moved out of the infinite loop entirely.) */
2688
2689 enum infwait_states
2690 {
2691 infwait_normal_state,
2692 infwait_step_watch_state,
2693 infwait_nonstep_watch_state
2694 };
2695
2696 /* Current inferior wait state. */
2697 static enum infwait_states infwait_state;
2698
2699 /* Data to be passed around while handling an event. This data is
2700 discarded between events. */
2701 struct execution_control_state
2702 {
2703 ptid_t ptid;
2704 /* The thread that got the event, if this was a thread event; NULL
2705 otherwise. */
2706 struct thread_info *event_thread;
2707
2708 struct target_waitstatus ws;
2709 int stop_func_filled_in;
2710 CORE_ADDR stop_func_start;
2711 CORE_ADDR stop_func_end;
2712 const char *stop_func_name;
2713 int wait_some_more;
2714
2715 /* True if the event thread hit the single-step breakpoint of
2716 another thread. Thus the event doesn't cause a stop, the thread
2717 needs to be single-stepped past the single-step breakpoint before
2718 we can switch back to the original stepping thread. */
2719 int hit_singlestep_breakpoint;
2720 };
2721
2722 static void handle_inferior_event (struct execution_control_state *ecs);
2723
2724 static void handle_step_into_function (struct gdbarch *gdbarch,
2725 struct execution_control_state *ecs);
2726 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2727 struct execution_control_state *ecs);
2728 static void handle_signal_stop (struct execution_control_state *ecs);
2729 static void check_exception_resume (struct execution_control_state *,
2730 struct frame_info *);
2731
2732 static void end_stepping_range (struct execution_control_state *ecs);
2733 static void stop_waiting (struct execution_control_state *ecs);
2734 static void prepare_to_wait (struct execution_control_state *ecs);
2735 static void keep_going (struct execution_control_state *ecs);
2736 static void process_event_stop_test (struct execution_control_state *ecs);
2737 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2738
2739 /* Callback for iterate over threads. If the thread is stopped, but
2740 the user/frontend doesn't know about that yet, go through
2741 normal_stop, as if the thread had just stopped now. ARG points at
2742 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2743 ptid_is_pid(PTID) is true, applies to all threads of the process
2744 pointed at by PTID. Otherwise, apply only to the thread pointed by
2745 PTID. */
2746
2747 static int
2748 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2749 {
2750 ptid_t ptid = * (ptid_t *) arg;
2751
2752 if ((ptid_equal (info->ptid, ptid)
2753 || ptid_equal (minus_one_ptid, ptid)
2754 || (ptid_is_pid (ptid)
2755 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2756 && is_running (info->ptid)
2757 && !is_executing (info->ptid))
2758 {
2759 struct cleanup *old_chain;
2760 struct execution_control_state ecss;
2761 struct execution_control_state *ecs = &ecss;
2762
2763 memset (ecs, 0, sizeof (*ecs));
2764
2765 old_chain = make_cleanup_restore_current_thread ();
2766
2767 overlay_cache_invalid = 1;
2768 /* Flush target cache before starting to handle each event.
2769 Target was running and cache could be stale. This is just a
2770 heuristic. Running threads may modify target memory, but we
2771 don't get any event. */
2772 target_dcache_invalidate ();
2773
2774 /* Go through handle_inferior_event/normal_stop, so we always
2775 have consistent output as if the stop event had been
2776 reported. */
2777 ecs->ptid = info->ptid;
2778 ecs->event_thread = find_thread_ptid (info->ptid);
2779 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2780 ecs->ws.value.sig = GDB_SIGNAL_0;
2781
2782 handle_inferior_event (ecs);
2783
2784 if (!ecs->wait_some_more)
2785 {
2786 struct thread_info *tp;
2787
2788 normal_stop ();
2789
2790 /* Finish off the continuations. */
2791 tp = inferior_thread ();
2792 do_all_intermediate_continuations_thread (tp, 1);
2793 do_all_continuations_thread (tp, 1);
2794 }
2795
2796 do_cleanups (old_chain);
2797 }
2798
2799 return 0;
2800 }
2801
2802 /* This function is attached as a "thread_stop_requested" observer.
2803 Cleanup local state that assumed the PTID was to be resumed, and
2804 report the stop to the frontend. */
2805
2806 static void
2807 infrun_thread_stop_requested (ptid_t ptid)
2808 {
2809 struct displaced_step_inferior_state *displaced;
2810
2811 /* PTID was requested to stop. Remove it from the displaced
2812 stepping queue, so we don't try to resume it automatically. */
2813
2814 for (displaced = displaced_step_inferior_states;
2815 displaced;
2816 displaced = displaced->next)
2817 {
2818 struct displaced_step_request *it, **prev_next_p;
2819
2820 it = displaced->step_request_queue;
2821 prev_next_p = &displaced->step_request_queue;
2822 while (it)
2823 {
2824 if (ptid_match (it->ptid, ptid))
2825 {
2826 *prev_next_p = it->next;
2827 it->next = NULL;
2828 xfree (it);
2829 }
2830 else
2831 {
2832 prev_next_p = &it->next;
2833 }
2834
2835 it = *prev_next_p;
2836 }
2837 }
2838
2839 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2840 }
2841
2842 static void
2843 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2844 {
2845 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2846 nullify_last_target_wait_ptid ();
2847 }
2848
2849 /* Delete the step resume, single-step and longjmp/exception resume
2850 breakpoints of TP. */
2851
2852 static void
2853 delete_thread_infrun_breakpoints (struct thread_info *tp)
2854 {
2855 delete_step_resume_breakpoint (tp);
2856 delete_exception_resume_breakpoint (tp);
2857 }
2858
2859 /* If the target still has execution, call FUNC for each thread that
2860 just stopped. In all-stop, that's all the non-exited threads; in
2861 non-stop, that's the current thread, only. */
2862
2863 typedef void (*for_each_just_stopped_thread_callback_func)
2864 (struct thread_info *tp);
2865
2866 static void
2867 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2868 {
2869 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
2870 return;
2871
2872 if (non_stop)
2873 {
2874 /* If in non-stop mode, only the current thread stopped. */
2875 func (inferior_thread ());
2876 }
2877 else
2878 {
2879 struct thread_info *tp;
2880
2881 /* In all-stop mode, all threads have stopped. */
2882 ALL_NON_EXITED_THREADS (tp)
2883 {
2884 func (tp);
2885 }
2886 }
2887 }
2888
2889 /* Delete the step resume and longjmp/exception resume breakpoints of
2890 the threads that just stopped. */
2891
2892 static void
2893 delete_just_stopped_threads_infrun_breakpoints (void)
2894 {
2895 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
2896 }
2897
2898 /* A cleanup wrapper. */
2899
2900 static void
2901 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
2902 {
2903 delete_just_stopped_threads_infrun_breakpoints ();
2904 }
2905
2906 /* Pretty print the results of target_wait, for debugging purposes. */
2907
2908 static void
2909 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2910 const struct target_waitstatus *ws)
2911 {
2912 char *status_string = target_waitstatus_to_string (ws);
2913 struct ui_file *tmp_stream = mem_fileopen ();
2914 char *text;
2915
2916 /* The text is split over several lines because it was getting too long.
2917 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2918 output as a unit; we want only one timestamp printed if debug_timestamp
2919 is set. */
2920
2921 fprintf_unfiltered (tmp_stream,
2922 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2923 if (ptid_get_pid (waiton_ptid) != -1)
2924 fprintf_unfiltered (tmp_stream,
2925 " [%s]", target_pid_to_str (waiton_ptid));
2926 fprintf_unfiltered (tmp_stream, ", status) =\n");
2927 fprintf_unfiltered (tmp_stream,
2928 "infrun: %d [%s],\n",
2929 ptid_get_pid (result_ptid),
2930 target_pid_to_str (result_ptid));
2931 fprintf_unfiltered (tmp_stream,
2932 "infrun: %s\n",
2933 status_string);
2934
2935 text = ui_file_xstrdup (tmp_stream, NULL);
2936
2937 /* This uses %s in part to handle %'s in the text, but also to avoid
2938 a gcc error: the format attribute requires a string literal. */
2939 fprintf_unfiltered (gdb_stdlog, "%s", text);
2940
2941 xfree (status_string);
2942 xfree (text);
2943 ui_file_delete (tmp_stream);
2944 }
2945
2946 /* Prepare and stabilize the inferior for detaching it. E.g.,
2947 detaching while a thread is displaced stepping is a recipe for
2948 crashing it, as nothing would readjust the PC out of the scratch
2949 pad. */
2950
2951 void
2952 prepare_for_detach (void)
2953 {
2954 struct inferior *inf = current_inferior ();
2955 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2956 struct cleanup *old_chain_1;
2957 struct displaced_step_inferior_state *displaced;
2958
2959 displaced = get_displaced_stepping_state (inf->pid);
2960
2961 /* Is any thread of this process displaced stepping? If not,
2962 there's nothing else to do. */
2963 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2964 return;
2965
2966 if (debug_infrun)
2967 fprintf_unfiltered (gdb_stdlog,
2968 "displaced-stepping in-process while detaching");
2969
2970 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2971 inf->detaching = 1;
2972
2973 while (!ptid_equal (displaced->step_ptid, null_ptid))
2974 {
2975 struct cleanup *old_chain_2;
2976 struct execution_control_state ecss;
2977 struct execution_control_state *ecs;
2978
2979 ecs = &ecss;
2980 memset (ecs, 0, sizeof (*ecs));
2981
2982 overlay_cache_invalid = 1;
2983 /* Flush target cache before starting to handle each event.
2984 Target was running and cache could be stale. This is just a
2985 heuristic. Running threads may modify target memory, but we
2986 don't get any event. */
2987 target_dcache_invalidate ();
2988
2989 if (deprecated_target_wait_hook)
2990 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2991 else
2992 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2993
2994 if (debug_infrun)
2995 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2996
2997 /* If an error happens while handling the event, propagate GDB's
2998 knowledge of the executing state to the frontend/user running
2999 state. */
3000 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3001 &minus_one_ptid);
3002
3003 /* Now figure out what to do with the result of the result. */
3004 handle_inferior_event (ecs);
3005
3006 /* No error, don't finish the state yet. */
3007 discard_cleanups (old_chain_2);
3008
3009 /* Breakpoints and watchpoints are not installed on the target
3010 at this point, and signals are passed directly to the
3011 inferior, so this must mean the process is gone. */
3012 if (!ecs->wait_some_more)
3013 {
3014 discard_cleanups (old_chain_1);
3015 error (_("Program exited while detaching"));
3016 }
3017 }
3018
3019 discard_cleanups (old_chain_1);
3020 }
3021
3022 /* Wait for control to return from inferior to debugger.
3023
3024 If inferior gets a signal, we may decide to start it up again
3025 instead of returning. That is why there is a loop in this function.
3026 When this function actually returns it means the inferior
3027 should be left stopped and GDB should read more commands. */
3028
3029 void
3030 wait_for_inferior (void)
3031 {
3032 struct cleanup *old_cleanups;
3033
3034 if (debug_infrun)
3035 fprintf_unfiltered
3036 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3037
3038 old_cleanups
3039 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3040 NULL);
3041
3042 while (1)
3043 {
3044 struct execution_control_state ecss;
3045 struct execution_control_state *ecs = &ecss;
3046 struct cleanup *old_chain;
3047 ptid_t waiton_ptid = minus_one_ptid;
3048
3049 memset (ecs, 0, sizeof (*ecs));
3050
3051 overlay_cache_invalid = 1;
3052
3053 /* Flush target cache before starting to handle each event.
3054 Target was running and cache could be stale. This is just a
3055 heuristic. Running threads may modify target memory, but we
3056 don't get any event. */
3057 target_dcache_invalidate ();
3058
3059 if (deprecated_target_wait_hook)
3060 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3061 else
3062 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3063
3064 if (debug_infrun)
3065 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3066
3067 /* If an error happens while handling the event, propagate GDB's
3068 knowledge of the executing state to the frontend/user running
3069 state. */
3070 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3071
3072 /* Now figure out what to do with the result of the result. */
3073 handle_inferior_event (ecs);
3074
3075 /* No error, don't finish the state yet. */
3076 discard_cleanups (old_chain);
3077
3078 if (!ecs->wait_some_more)
3079 break;
3080 }
3081
3082 do_cleanups (old_cleanups);
3083 }
3084
3085 /* Asynchronous version of wait_for_inferior. It is called by the
3086 event loop whenever a change of state is detected on the file
3087 descriptor corresponding to the target. It can be called more than
3088 once to complete a single execution command. In such cases we need
3089 to keep the state in a global variable ECSS. If it is the last time
3090 that this function is called for a single execution command, then
3091 report to the user that the inferior has stopped, and do the
3092 necessary cleanups. */
3093
3094 void
3095 fetch_inferior_event (void *client_data)
3096 {
3097 struct execution_control_state ecss;
3098 struct execution_control_state *ecs = &ecss;
3099 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3100 struct cleanup *ts_old_chain;
3101 int was_sync = sync_execution;
3102 int cmd_done = 0;
3103 ptid_t waiton_ptid = minus_one_ptid;
3104
3105 memset (ecs, 0, sizeof (*ecs));
3106
3107 /* We're handling a live event, so make sure we're doing live
3108 debugging. If we're looking at traceframes while the target is
3109 running, we're going to need to get back to that mode after
3110 handling the event. */
3111 if (non_stop)
3112 {
3113 make_cleanup_restore_current_traceframe ();
3114 set_current_traceframe (-1);
3115 }
3116
3117 if (non_stop)
3118 /* In non-stop mode, the user/frontend should not notice a thread
3119 switch due to internal events. Make sure we reverse to the
3120 user selected thread and frame after handling the event and
3121 running any breakpoint commands. */
3122 make_cleanup_restore_current_thread ();
3123
3124 overlay_cache_invalid = 1;
3125 /* Flush target cache before starting to handle each event. Target
3126 was running and cache could be stale. This is just a heuristic.
3127 Running threads may modify target memory, but we don't get any
3128 event. */
3129 target_dcache_invalidate ();
3130
3131 make_cleanup_restore_integer (&execution_direction);
3132 execution_direction = target_execution_direction ();
3133
3134 if (deprecated_target_wait_hook)
3135 ecs->ptid =
3136 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3137 else
3138 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3139
3140 if (debug_infrun)
3141 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3142
3143 /* If an error happens while handling the event, propagate GDB's
3144 knowledge of the executing state to the frontend/user running
3145 state. */
3146 if (!non_stop)
3147 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3148 else
3149 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3150
3151 /* Get executed before make_cleanup_restore_current_thread above to apply
3152 still for the thread which has thrown the exception. */
3153 make_bpstat_clear_actions_cleanup ();
3154
3155 /* Now figure out what to do with the result of the result. */
3156 handle_inferior_event (ecs);
3157
3158 if (!ecs->wait_some_more)
3159 {
3160 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3161
3162 delete_just_stopped_threads_infrun_breakpoints ();
3163
3164 /* We may not find an inferior if this was a process exit. */
3165 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3166 normal_stop ();
3167
3168 if (target_has_execution
3169 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3170 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3171 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3172 && ecs->event_thread->step_multi
3173 && ecs->event_thread->control.stop_step)
3174 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3175 else
3176 {
3177 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3178 cmd_done = 1;
3179 }
3180 }
3181
3182 /* No error, don't finish the thread states yet. */
3183 discard_cleanups (ts_old_chain);
3184
3185 /* Revert thread and frame. */
3186 do_cleanups (old_chain);
3187
3188 /* If the inferior was in sync execution mode, and now isn't,
3189 restore the prompt (a synchronous execution command has finished,
3190 and we're ready for input). */
3191 if (interpreter_async && was_sync && !sync_execution)
3192 observer_notify_sync_execution_done ();
3193
3194 if (cmd_done
3195 && !was_sync
3196 && exec_done_display_p
3197 && (ptid_equal (inferior_ptid, null_ptid)
3198 || !is_running (inferior_ptid)))
3199 printf_unfiltered (_("completed.\n"));
3200 }
3201
3202 /* Record the frame and location we're currently stepping through. */
3203 void
3204 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3205 {
3206 struct thread_info *tp = inferior_thread ();
3207
3208 tp->control.step_frame_id = get_frame_id (frame);
3209 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3210
3211 tp->current_symtab = sal.symtab;
3212 tp->current_line = sal.line;
3213 }
3214
3215 /* Clear context switchable stepping state. */
3216
3217 void
3218 init_thread_stepping_state (struct thread_info *tss)
3219 {
3220 tss->stepping_over_breakpoint = 0;
3221 tss->stepping_over_watchpoint = 0;
3222 tss->step_after_step_resume_breakpoint = 0;
3223 }
3224
3225 /* Set the cached copy of the last ptid/waitstatus. */
3226
3227 static void
3228 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3229 {
3230 target_last_wait_ptid = ptid;
3231 target_last_waitstatus = status;
3232 }
3233
3234 /* Return the cached copy of the last pid/waitstatus returned by
3235 target_wait()/deprecated_target_wait_hook(). The data is actually
3236 cached by handle_inferior_event(), which gets called immediately
3237 after target_wait()/deprecated_target_wait_hook(). */
3238
3239 void
3240 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3241 {
3242 *ptidp = target_last_wait_ptid;
3243 *status = target_last_waitstatus;
3244 }
3245
3246 void
3247 nullify_last_target_wait_ptid (void)
3248 {
3249 target_last_wait_ptid = minus_one_ptid;
3250 }
3251
3252 /* Switch thread contexts. */
3253
3254 static void
3255 context_switch (ptid_t ptid)
3256 {
3257 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3258 {
3259 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3260 target_pid_to_str (inferior_ptid));
3261 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3262 target_pid_to_str (ptid));
3263 }
3264
3265 switch_to_thread (ptid);
3266 }
3267
3268 static void
3269 adjust_pc_after_break (struct execution_control_state *ecs)
3270 {
3271 struct regcache *regcache;
3272 struct gdbarch *gdbarch;
3273 struct address_space *aspace;
3274 CORE_ADDR breakpoint_pc, decr_pc;
3275
3276 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3277 we aren't, just return.
3278
3279 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3280 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3281 implemented by software breakpoints should be handled through the normal
3282 breakpoint layer.
3283
3284 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3285 different signals (SIGILL or SIGEMT for instance), but it is less
3286 clear where the PC is pointing afterwards. It may not match
3287 gdbarch_decr_pc_after_break. I don't know any specific target that
3288 generates these signals at breakpoints (the code has been in GDB since at
3289 least 1992) so I can not guess how to handle them here.
3290
3291 In earlier versions of GDB, a target with
3292 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3293 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3294 target with both of these set in GDB history, and it seems unlikely to be
3295 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3296
3297 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3298 return;
3299
3300 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3301 return;
3302
3303 /* In reverse execution, when a breakpoint is hit, the instruction
3304 under it has already been de-executed. The reported PC always
3305 points at the breakpoint address, so adjusting it further would
3306 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3307 architecture:
3308
3309 B1 0x08000000 : INSN1
3310 B2 0x08000001 : INSN2
3311 0x08000002 : INSN3
3312 PC -> 0x08000003 : INSN4
3313
3314 Say you're stopped at 0x08000003 as above. Reverse continuing
3315 from that point should hit B2 as below. Reading the PC when the
3316 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3317 been de-executed already.
3318
3319 B1 0x08000000 : INSN1
3320 B2 PC -> 0x08000001 : INSN2
3321 0x08000002 : INSN3
3322 0x08000003 : INSN4
3323
3324 We can't apply the same logic as for forward execution, because
3325 we would wrongly adjust the PC to 0x08000000, since there's a
3326 breakpoint at PC - 1. We'd then report a hit on B1, although
3327 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3328 behaviour. */
3329 if (execution_direction == EXEC_REVERSE)
3330 return;
3331
3332 /* If this target does not decrement the PC after breakpoints, then
3333 we have nothing to do. */
3334 regcache = get_thread_regcache (ecs->ptid);
3335 gdbarch = get_regcache_arch (regcache);
3336
3337 decr_pc = target_decr_pc_after_break (gdbarch);
3338 if (decr_pc == 0)
3339 return;
3340
3341 aspace = get_regcache_aspace (regcache);
3342
3343 /* Find the location where (if we've hit a breakpoint) the
3344 breakpoint would be. */
3345 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3346
3347 /* Check whether there actually is a software breakpoint inserted at
3348 that location.
3349
3350 If in non-stop mode, a race condition is possible where we've
3351 removed a breakpoint, but stop events for that breakpoint were
3352 already queued and arrive later. To suppress those spurious
3353 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3354 and retire them after a number of stop events are reported. */
3355 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3356 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3357 {
3358 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3359
3360 if (record_full_is_used ())
3361 record_full_gdb_operation_disable_set ();
3362
3363 /* When using hardware single-step, a SIGTRAP is reported for both
3364 a completed single-step and a software breakpoint. Need to
3365 differentiate between the two, as the latter needs adjusting
3366 but the former does not.
3367
3368 The SIGTRAP can be due to a completed hardware single-step only if
3369 - we didn't insert software single-step breakpoints
3370 - the thread to be examined is still the current thread
3371 - this thread is currently being stepped
3372
3373 If any of these events did not occur, we must have stopped due
3374 to hitting a software breakpoint, and have to back up to the
3375 breakpoint address.
3376
3377 As a special case, we could have hardware single-stepped a
3378 software breakpoint. In this case (prev_pc == breakpoint_pc),
3379 we also need to back up to the breakpoint address. */
3380
3381 if (singlestep_breakpoints_inserted_p
3382 || !ptid_equal (ecs->ptid, inferior_ptid)
3383 || !currently_stepping (ecs->event_thread)
3384 || ecs->event_thread->prev_pc == breakpoint_pc)
3385 regcache_write_pc (regcache, breakpoint_pc);
3386
3387 do_cleanups (old_cleanups);
3388 }
3389 }
3390
3391 static int
3392 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3393 {
3394 for (frame = get_prev_frame (frame);
3395 frame != NULL;
3396 frame = get_prev_frame (frame))
3397 {
3398 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3399 return 1;
3400 if (get_frame_type (frame) != INLINE_FRAME)
3401 break;
3402 }
3403
3404 return 0;
3405 }
3406
3407 /* Auxiliary function that handles syscall entry/return events.
3408 It returns 1 if the inferior should keep going (and GDB
3409 should ignore the event), or 0 if the event deserves to be
3410 processed. */
3411
3412 static int
3413 handle_syscall_event (struct execution_control_state *ecs)
3414 {
3415 struct regcache *regcache;
3416 int syscall_number;
3417
3418 if (!ptid_equal (ecs->ptid, inferior_ptid))
3419 context_switch (ecs->ptid);
3420
3421 regcache = get_thread_regcache (ecs->ptid);
3422 syscall_number = ecs->ws.value.syscall_number;
3423 stop_pc = regcache_read_pc (regcache);
3424
3425 if (catch_syscall_enabled () > 0
3426 && catching_syscall_number (syscall_number) > 0)
3427 {
3428 if (debug_infrun)
3429 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3430 syscall_number);
3431
3432 ecs->event_thread->control.stop_bpstat
3433 = bpstat_stop_status (get_regcache_aspace (regcache),
3434 stop_pc, ecs->ptid, &ecs->ws);
3435
3436 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3437 {
3438 /* Catchpoint hit. */
3439 return 0;
3440 }
3441 }
3442
3443 /* If no catchpoint triggered for this, then keep going. */
3444 keep_going (ecs);
3445 return 1;
3446 }
3447
3448 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3449
3450 static void
3451 fill_in_stop_func (struct gdbarch *gdbarch,
3452 struct execution_control_state *ecs)
3453 {
3454 if (!ecs->stop_func_filled_in)
3455 {
3456 /* Don't care about return value; stop_func_start and stop_func_name
3457 will both be 0 if it doesn't work. */
3458 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3459 &ecs->stop_func_start, &ecs->stop_func_end);
3460 ecs->stop_func_start
3461 += gdbarch_deprecated_function_start_offset (gdbarch);
3462
3463 if (gdbarch_skip_entrypoint_p (gdbarch))
3464 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3465 ecs->stop_func_start);
3466
3467 ecs->stop_func_filled_in = 1;
3468 }
3469 }
3470
3471
3472 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3473
3474 static enum stop_kind
3475 get_inferior_stop_soon (ptid_t ptid)
3476 {
3477 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3478
3479 gdb_assert (inf != NULL);
3480 return inf->control.stop_soon;
3481 }
3482
3483 /* Given an execution control state that has been freshly filled in by
3484 an event from the inferior, figure out what it means and take
3485 appropriate action.
3486
3487 The alternatives are:
3488
3489 1) stop_waiting and return; to really stop and return to the
3490 debugger.
3491
3492 2) keep_going and return; to wait for the next event (set
3493 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3494 once). */
3495
3496 static void
3497 handle_inferior_event (struct execution_control_state *ecs)
3498 {
3499 enum stop_kind stop_soon;
3500
3501 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3502 {
3503 /* We had an event in the inferior, but we are not interested in
3504 handling it at this level. The lower layers have already
3505 done what needs to be done, if anything.
3506
3507 One of the possible circumstances for this is when the
3508 inferior produces output for the console. The inferior has
3509 not stopped, and we are ignoring the event. Another possible
3510 circumstance is any event which the lower level knows will be
3511 reported multiple times without an intervening resume. */
3512 if (debug_infrun)
3513 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3514 prepare_to_wait (ecs);
3515 return;
3516 }
3517
3518 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3519 && target_can_async_p () && !sync_execution)
3520 {
3521 /* There were no unwaited-for children left in the target, but,
3522 we're not synchronously waiting for events either. Just
3523 ignore. Otherwise, if we were running a synchronous
3524 execution command, we need to cancel it and give the user
3525 back the terminal. */
3526 if (debug_infrun)
3527 fprintf_unfiltered (gdb_stdlog,
3528 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3529 prepare_to_wait (ecs);
3530 return;
3531 }
3532
3533 /* Cache the last pid/waitstatus. */
3534 set_last_target_status (ecs->ptid, ecs->ws);
3535
3536 /* Always clear state belonging to the previous time we stopped. */
3537 stop_stack_dummy = STOP_NONE;
3538
3539 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3540 {
3541 /* No unwaited-for children left. IOW, all resumed children
3542 have exited. */
3543 if (debug_infrun)
3544 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3545
3546 stop_print_frame = 0;
3547 stop_waiting (ecs);
3548 return;
3549 }
3550
3551 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3552 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3553 {
3554 ecs->event_thread = find_thread_ptid (ecs->ptid);
3555 /* If it's a new thread, add it to the thread database. */
3556 if (ecs->event_thread == NULL)
3557 ecs->event_thread = add_thread (ecs->ptid);
3558
3559 /* Disable range stepping. If the next step request could use a
3560 range, this will be end up re-enabled then. */
3561 ecs->event_thread->control.may_range_step = 0;
3562 }
3563
3564 /* Dependent on valid ECS->EVENT_THREAD. */
3565 adjust_pc_after_break (ecs);
3566
3567 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3568 reinit_frame_cache ();
3569
3570 breakpoint_retire_moribund ();
3571
3572 /* First, distinguish signals caused by the debugger from signals
3573 that have to do with the program's own actions. Note that
3574 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3575 on the operating system version. Here we detect when a SIGILL or
3576 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3577 something similar for SIGSEGV, since a SIGSEGV will be generated
3578 when we're trying to execute a breakpoint instruction on a
3579 non-executable stack. This happens for call dummy breakpoints
3580 for architectures like SPARC that place call dummies on the
3581 stack. */
3582 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3583 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3584 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3585 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3586 {
3587 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3588
3589 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3590 regcache_read_pc (regcache)))
3591 {
3592 if (debug_infrun)
3593 fprintf_unfiltered (gdb_stdlog,
3594 "infrun: Treating signal as SIGTRAP\n");
3595 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3596 }
3597 }
3598
3599 /* Mark the non-executing threads accordingly. In all-stop, all
3600 threads of all processes are stopped when we get any event
3601 reported. In non-stop mode, only the event thread stops. If
3602 we're handling a process exit in non-stop mode, there's nothing
3603 to do, as threads of the dead process are gone, and threads of
3604 any other process were left running. */
3605 if (!non_stop)
3606 set_executing (minus_one_ptid, 0);
3607 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3608 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3609 set_executing (ecs->ptid, 0);
3610
3611 switch (ecs->ws.kind)
3612 {
3613 case TARGET_WAITKIND_LOADED:
3614 if (debug_infrun)
3615 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3616 if (!ptid_equal (ecs->ptid, inferior_ptid))
3617 context_switch (ecs->ptid);
3618 /* Ignore gracefully during startup of the inferior, as it might
3619 be the shell which has just loaded some objects, otherwise
3620 add the symbols for the newly loaded objects. Also ignore at
3621 the beginning of an attach or remote session; we will query
3622 the full list of libraries once the connection is
3623 established. */
3624
3625 stop_soon = get_inferior_stop_soon (ecs->ptid);
3626 if (stop_soon == NO_STOP_QUIETLY)
3627 {
3628 struct regcache *regcache;
3629
3630 regcache = get_thread_regcache (ecs->ptid);
3631
3632 handle_solib_event ();
3633
3634 ecs->event_thread->control.stop_bpstat
3635 = bpstat_stop_status (get_regcache_aspace (regcache),
3636 stop_pc, ecs->ptid, &ecs->ws);
3637
3638 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3639 {
3640 /* A catchpoint triggered. */
3641 process_event_stop_test (ecs);
3642 return;
3643 }
3644
3645 /* If requested, stop when the dynamic linker notifies
3646 gdb of events. This allows the user to get control
3647 and place breakpoints in initializer routines for
3648 dynamically loaded objects (among other things). */
3649 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3650 if (stop_on_solib_events)
3651 {
3652 /* Make sure we print "Stopped due to solib-event" in
3653 normal_stop. */
3654 stop_print_frame = 1;
3655
3656 stop_waiting (ecs);
3657 return;
3658 }
3659 }
3660
3661 /* If we are skipping through a shell, or through shared library
3662 loading that we aren't interested in, resume the program. If
3663 we're running the program normally, also resume. */
3664 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3665 {
3666 /* Loading of shared libraries might have changed breakpoint
3667 addresses. Make sure new breakpoints are inserted. */
3668 if (stop_soon == NO_STOP_QUIETLY)
3669 insert_breakpoints ();
3670 resume (0, GDB_SIGNAL_0);
3671 prepare_to_wait (ecs);
3672 return;
3673 }
3674
3675 /* But stop if we're attaching or setting up a remote
3676 connection. */
3677 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3678 || stop_soon == STOP_QUIETLY_REMOTE)
3679 {
3680 if (debug_infrun)
3681 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3682 stop_waiting (ecs);
3683 return;
3684 }
3685
3686 internal_error (__FILE__, __LINE__,
3687 _("unhandled stop_soon: %d"), (int) stop_soon);
3688
3689 case TARGET_WAITKIND_SPURIOUS:
3690 if (debug_infrun)
3691 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3692 if (!ptid_equal (ecs->ptid, inferior_ptid))
3693 context_switch (ecs->ptid);
3694 resume (0, GDB_SIGNAL_0);
3695 prepare_to_wait (ecs);
3696 return;
3697
3698 case TARGET_WAITKIND_EXITED:
3699 case TARGET_WAITKIND_SIGNALLED:
3700 if (debug_infrun)
3701 {
3702 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3703 fprintf_unfiltered (gdb_stdlog,
3704 "infrun: TARGET_WAITKIND_EXITED\n");
3705 else
3706 fprintf_unfiltered (gdb_stdlog,
3707 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3708 }
3709
3710 inferior_ptid = ecs->ptid;
3711 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3712 set_current_program_space (current_inferior ()->pspace);
3713 handle_vfork_child_exec_or_exit (0);
3714 target_terminal_ours (); /* Must do this before mourn anyway. */
3715
3716 /* Clearing any previous state of convenience variables. */
3717 clear_exit_convenience_vars ();
3718
3719 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3720 {
3721 /* Record the exit code in the convenience variable $_exitcode, so
3722 that the user can inspect this again later. */
3723 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3724 (LONGEST) ecs->ws.value.integer);
3725
3726 /* Also record this in the inferior itself. */
3727 current_inferior ()->has_exit_code = 1;
3728 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3729
3730 /* Support the --return-child-result option. */
3731 return_child_result_value = ecs->ws.value.integer;
3732
3733 observer_notify_exited (ecs->ws.value.integer);
3734 }
3735 else
3736 {
3737 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3738 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3739
3740 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3741 {
3742 /* Set the value of the internal variable $_exitsignal,
3743 which holds the signal uncaught by the inferior. */
3744 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3745 gdbarch_gdb_signal_to_target (gdbarch,
3746 ecs->ws.value.sig));
3747 }
3748 else
3749 {
3750 /* We don't have access to the target's method used for
3751 converting between signal numbers (GDB's internal
3752 representation <-> target's representation).
3753 Therefore, we cannot do a good job at displaying this
3754 information to the user. It's better to just warn
3755 her about it (if infrun debugging is enabled), and
3756 give up. */
3757 if (debug_infrun)
3758 fprintf_filtered (gdb_stdlog, _("\
3759 Cannot fill $_exitsignal with the correct signal number.\n"));
3760 }
3761
3762 observer_notify_signal_exited (ecs->ws.value.sig);
3763 }
3764
3765 gdb_flush (gdb_stdout);
3766 target_mourn_inferior ();
3767 singlestep_breakpoints_inserted_p = 0;
3768 cancel_single_step_breakpoints ();
3769 stop_print_frame = 0;
3770 stop_waiting (ecs);
3771 return;
3772
3773 /* The following are the only cases in which we keep going;
3774 the above cases end in a continue or goto. */
3775 case TARGET_WAITKIND_FORKED:
3776 case TARGET_WAITKIND_VFORKED:
3777 if (debug_infrun)
3778 {
3779 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3780 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3781 else
3782 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3783 }
3784
3785 /* Check whether the inferior is displaced stepping. */
3786 {
3787 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3788 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3789 struct displaced_step_inferior_state *displaced
3790 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3791
3792 /* If checking displaced stepping is supported, and thread
3793 ecs->ptid is displaced stepping. */
3794 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3795 {
3796 struct inferior *parent_inf
3797 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3798 struct regcache *child_regcache;
3799 CORE_ADDR parent_pc;
3800
3801 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3802 indicating that the displaced stepping of syscall instruction
3803 has been done. Perform cleanup for parent process here. Note
3804 that this operation also cleans up the child process for vfork,
3805 because their pages are shared. */
3806 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3807
3808 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3809 {
3810 /* Restore scratch pad for child process. */
3811 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3812 }
3813
3814 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3815 the child's PC is also within the scratchpad. Set the child's PC
3816 to the parent's PC value, which has already been fixed up.
3817 FIXME: we use the parent's aspace here, although we're touching
3818 the child, because the child hasn't been added to the inferior
3819 list yet at this point. */
3820
3821 child_regcache
3822 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3823 gdbarch,
3824 parent_inf->aspace);
3825 /* Read PC value of parent process. */
3826 parent_pc = regcache_read_pc (regcache);
3827
3828 if (debug_displaced)
3829 fprintf_unfiltered (gdb_stdlog,
3830 "displaced: write child pc from %s to %s\n",
3831 paddress (gdbarch,
3832 regcache_read_pc (child_regcache)),
3833 paddress (gdbarch, parent_pc));
3834
3835 regcache_write_pc (child_regcache, parent_pc);
3836 }
3837 }
3838
3839 if (!ptid_equal (ecs->ptid, inferior_ptid))
3840 context_switch (ecs->ptid);
3841
3842 /* Immediately detach breakpoints from the child before there's
3843 any chance of letting the user delete breakpoints from the
3844 breakpoint lists. If we don't do this early, it's easy to
3845 leave left over traps in the child, vis: "break foo; catch
3846 fork; c; <fork>; del; c; <child calls foo>". We only follow
3847 the fork on the last `continue', and by that time the
3848 breakpoint at "foo" is long gone from the breakpoint table.
3849 If we vforked, then we don't need to unpatch here, since both
3850 parent and child are sharing the same memory pages; we'll
3851 need to unpatch at follow/detach time instead to be certain
3852 that new breakpoints added between catchpoint hit time and
3853 vfork follow are detached. */
3854 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3855 {
3856 /* This won't actually modify the breakpoint list, but will
3857 physically remove the breakpoints from the child. */
3858 detach_breakpoints (ecs->ws.value.related_pid);
3859 }
3860
3861 if (singlestep_breakpoints_inserted_p)
3862 {
3863 /* Pull the single step breakpoints out of the target. */
3864 remove_single_step_breakpoints ();
3865 singlestep_breakpoints_inserted_p = 0;
3866 }
3867
3868 /* In case the event is caught by a catchpoint, remember that
3869 the event is to be followed at the next resume of the thread,
3870 and not immediately. */
3871 ecs->event_thread->pending_follow = ecs->ws;
3872
3873 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3874
3875 ecs->event_thread->control.stop_bpstat
3876 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3877 stop_pc, ecs->ptid, &ecs->ws);
3878
3879 /* If no catchpoint triggered for this, then keep going. Note
3880 that we're interested in knowing the bpstat actually causes a
3881 stop, not just if it may explain the signal. Software
3882 watchpoints, for example, always appear in the bpstat. */
3883 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3884 {
3885 ptid_t parent;
3886 ptid_t child;
3887 int should_resume;
3888 int follow_child
3889 = (follow_fork_mode_string == follow_fork_mode_child);
3890
3891 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3892
3893 should_resume = follow_fork ();
3894
3895 parent = ecs->ptid;
3896 child = ecs->ws.value.related_pid;
3897
3898 /* In non-stop mode, also resume the other branch. */
3899 if (non_stop && !detach_fork)
3900 {
3901 if (follow_child)
3902 switch_to_thread (parent);
3903 else
3904 switch_to_thread (child);
3905
3906 ecs->event_thread = inferior_thread ();
3907 ecs->ptid = inferior_ptid;
3908 keep_going (ecs);
3909 }
3910
3911 if (follow_child)
3912 switch_to_thread (child);
3913 else
3914 switch_to_thread (parent);
3915
3916 ecs->event_thread = inferior_thread ();
3917 ecs->ptid = inferior_ptid;
3918
3919 if (should_resume)
3920 keep_going (ecs);
3921 else
3922 stop_waiting (ecs);
3923 return;
3924 }
3925 process_event_stop_test (ecs);
3926 return;
3927
3928 case TARGET_WAITKIND_VFORK_DONE:
3929 /* Done with the shared memory region. Re-insert breakpoints in
3930 the parent, and keep going. */
3931
3932 if (debug_infrun)
3933 fprintf_unfiltered (gdb_stdlog,
3934 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3935
3936 if (!ptid_equal (ecs->ptid, inferior_ptid))
3937 context_switch (ecs->ptid);
3938
3939 current_inferior ()->waiting_for_vfork_done = 0;
3940 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3941 /* This also takes care of reinserting breakpoints in the
3942 previously locked inferior. */
3943 keep_going (ecs);
3944 return;
3945
3946 case TARGET_WAITKIND_EXECD:
3947 if (debug_infrun)
3948 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3949
3950 if (!ptid_equal (ecs->ptid, inferior_ptid))
3951 context_switch (ecs->ptid);
3952
3953 singlestep_breakpoints_inserted_p = 0;
3954 cancel_single_step_breakpoints ();
3955
3956 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3957
3958 /* Do whatever is necessary to the parent branch of the vfork. */
3959 handle_vfork_child_exec_or_exit (1);
3960
3961 /* This causes the eventpoints and symbol table to be reset.
3962 Must do this now, before trying to determine whether to
3963 stop. */
3964 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3965
3966 ecs->event_thread->control.stop_bpstat
3967 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3968 stop_pc, ecs->ptid, &ecs->ws);
3969
3970 /* Note that this may be referenced from inside
3971 bpstat_stop_status above, through inferior_has_execd. */
3972 xfree (ecs->ws.value.execd_pathname);
3973 ecs->ws.value.execd_pathname = NULL;
3974
3975 /* If no catchpoint triggered for this, then keep going. */
3976 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3977 {
3978 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3979 keep_going (ecs);
3980 return;
3981 }
3982 process_event_stop_test (ecs);
3983 return;
3984
3985 /* Be careful not to try to gather much state about a thread
3986 that's in a syscall. It's frequently a losing proposition. */
3987 case TARGET_WAITKIND_SYSCALL_ENTRY:
3988 if (debug_infrun)
3989 fprintf_unfiltered (gdb_stdlog,
3990 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3991 /* Getting the current syscall number. */
3992 if (handle_syscall_event (ecs) == 0)
3993 process_event_stop_test (ecs);
3994 return;
3995
3996 /* Before examining the threads further, step this thread to
3997 get it entirely out of the syscall. (We get notice of the
3998 event when the thread is just on the verge of exiting a
3999 syscall. Stepping one instruction seems to get it back
4000 into user code.) */
4001 case TARGET_WAITKIND_SYSCALL_RETURN:
4002 if (debug_infrun)
4003 fprintf_unfiltered (gdb_stdlog,
4004 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4005 if (handle_syscall_event (ecs) == 0)
4006 process_event_stop_test (ecs);
4007 return;
4008
4009 case TARGET_WAITKIND_STOPPED:
4010 if (debug_infrun)
4011 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4012 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4013 handle_signal_stop (ecs);
4014 return;
4015
4016 case TARGET_WAITKIND_NO_HISTORY:
4017 if (debug_infrun)
4018 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4019 /* Reverse execution: target ran out of history info. */
4020
4021 /* Pull the single step breakpoints out of the target. */
4022 if (singlestep_breakpoints_inserted_p)
4023 {
4024 if (!ptid_equal (ecs->ptid, inferior_ptid))
4025 context_switch (ecs->ptid);
4026 remove_single_step_breakpoints ();
4027 singlestep_breakpoints_inserted_p = 0;
4028 }
4029 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4030 observer_notify_no_history ();
4031 stop_waiting (ecs);
4032 return;
4033 }
4034 }
4035
4036 /* Come here when the program has stopped with a signal. */
4037
4038 static void
4039 handle_signal_stop (struct execution_control_state *ecs)
4040 {
4041 struct frame_info *frame;
4042 struct gdbarch *gdbarch;
4043 int stopped_by_watchpoint;
4044 enum stop_kind stop_soon;
4045 int random_signal;
4046
4047 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4048
4049 /* Do we need to clean up the state of a thread that has
4050 completed a displaced single-step? (Doing so usually affects
4051 the PC, so do it here, before we set stop_pc.) */
4052 displaced_step_fixup (ecs->ptid,
4053 ecs->event_thread->suspend.stop_signal);
4054
4055 /* If we either finished a single-step or hit a breakpoint, but
4056 the user wanted this thread to be stopped, pretend we got a
4057 SIG0 (generic unsignaled stop). */
4058 if (ecs->event_thread->stop_requested
4059 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4060 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4061
4062 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4063
4064 if (debug_infrun)
4065 {
4066 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4067 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4068 struct cleanup *old_chain = save_inferior_ptid ();
4069
4070 inferior_ptid = ecs->ptid;
4071
4072 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4073 paddress (gdbarch, stop_pc));
4074 if (target_stopped_by_watchpoint ())
4075 {
4076 CORE_ADDR addr;
4077
4078 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4079
4080 if (target_stopped_data_address (&current_target, &addr))
4081 fprintf_unfiltered (gdb_stdlog,
4082 "infrun: stopped data address = %s\n",
4083 paddress (gdbarch, addr));
4084 else
4085 fprintf_unfiltered (gdb_stdlog,
4086 "infrun: (no data address available)\n");
4087 }
4088
4089 do_cleanups (old_chain);
4090 }
4091
4092 /* This is originated from start_remote(), start_inferior() and
4093 shared libraries hook functions. */
4094 stop_soon = get_inferior_stop_soon (ecs->ptid);
4095 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4096 {
4097 if (!ptid_equal (ecs->ptid, inferior_ptid))
4098 context_switch (ecs->ptid);
4099 if (debug_infrun)
4100 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4101 stop_print_frame = 1;
4102 stop_waiting (ecs);
4103 return;
4104 }
4105
4106 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4107 && stop_after_trap)
4108 {
4109 if (!ptid_equal (ecs->ptid, inferior_ptid))
4110 context_switch (ecs->ptid);
4111 if (debug_infrun)
4112 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4113 stop_print_frame = 0;
4114 stop_waiting (ecs);
4115 return;
4116 }
4117
4118 /* This originates from attach_command(). We need to overwrite
4119 the stop_signal here, because some kernels don't ignore a
4120 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4121 See more comments in inferior.h. On the other hand, if we
4122 get a non-SIGSTOP, report it to the user - assume the backend
4123 will handle the SIGSTOP if it should show up later.
4124
4125 Also consider that the attach is complete when we see a
4126 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4127 target extended-remote report it instead of a SIGSTOP
4128 (e.g. gdbserver). We already rely on SIGTRAP being our
4129 signal, so this is no exception.
4130
4131 Also consider that the attach is complete when we see a
4132 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4133 the target to stop all threads of the inferior, in case the
4134 low level attach operation doesn't stop them implicitly. If
4135 they weren't stopped implicitly, then the stub will report a
4136 GDB_SIGNAL_0, meaning: stopped for no particular reason
4137 other than GDB's request. */
4138 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4139 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4140 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4141 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4142 {
4143 stop_print_frame = 1;
4144 stop_waiting (ecs);
4145 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4146 return;
4147 }
4148
4149 /* See if something interesting happened to the non-current thread. If
4150 so, then switch to that thread. */
4151 if (!ptid_equal (ecs->ptid, inferior_ptid))
4152 {
4153 if (debug_infrun)
4154 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4155
4156 context_switch (ecs->ptid);
4157
4158 if (deprecated_context_hook)
4159 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4160 }
4161
4162 /* At this point, get hold of the now-current thread's frame. */
4163 frame = get_current_frame ();
4164 gdbarch = get_frame_arch (frame);
4165
4166 /* Pull the single step breakpoints out of the target. */
4167 if (singlestep_breakpoints_inserted_p)
4168 {
4169 /* However, before doing so, if this single-step breakpoint was
4170 actually for another thread, set this thread up for moving
4171 past it. */
4172 if (!ptid_equal (ecs->ptid, singlestep_ptid)
4173 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4174 {
4175 struct regcache *regcache;
4176 struct address_space *aspace;
4177 CORE_ADDR pc;
4178
4179 regcache = get_thread_regcache (ecs->ptid);
4180 aspace = get_regcache_aspace (regcache);
4181 pc = regcache_read_pc (regcache);
4182 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4183 {
4184 if (debug_infrun)
4185 {
4186 fprintf_unfiltered (gdb_stdlog,
4187 "infrun: [%s] hit step over single-step"
4188 " breakpoint of [%s]\n",
4189 target_pid_to_str (ecs->ptid),
4190 target_pid_to_str (singlestep_ptid));
4191 }
4192 ecs->hit_singlestep_breakpoint = 1;
4193 }
4194 }
4195
4196 remove_single_step_breakpoints ();
4197 singlestep_breakpoints_inserted_p = 0;
4198 }
4199
4200 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4201 && ecs->event_thread->control.trap_expected
4202 && ecs->event_thread->stepping_over_watchpoint)
4203 stopped_by_watchpoint = 0;
4204 else
4205 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4206
4207 /* If necessary, step over this watchpoint. We'll be back to display
4208 it in a moment. */
4209 if (stopped_by_watchpoint
4210 && (target_have_steppable_watchpoint
4211 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4212 {
4213 /* At this point, we are stopped at an instruction which has
4214 attempted to write to a piece of memory under control of
4215 a watchpoint. The instruction hasn't actually executed
4216 yet. If we were to evaluate the watchpoint expression
4217 now, we would get the old value, and therefore no change
4218 would seem to have occurred.
4219
4220 In order to make watchpoints work `right', we really need
4221 to complete the memory write, and then evaluate the
4222 watchpoint expression. We do this by single-stepping the
4223 target.
4224
4225 It may not be necessary to disable the watchpoint to step over
4226 it. For example, the PA can (with some kernel cooperation)
4227 single step over a watchpoint without disabling the watchpoint.
4228
4229 It is far more common to need to disable a watchpoint to step
4230 the inferior over it. If we have non-steppable watchpoints,
4231 we must disable the current watchpoint; it's simplest to
4232 disable all watchpoints.
4233
4234 Any breakpoint at PC must also be stepped over -- if there's
4235 one, it will have already triggered before the watchpoint
4236 triggered, and we either already reported it to the user, or
4237 it didn't cause a stop and we called keep_going. In either
4238 case, if there was a breakpoint at PC, we must be trying to
4239 step past it. */
4240 ecs->event_thread->stepping_over_watchpoint = 1;
4241 keep_going (ecs);
4242 return;
4243 }
4244
4245 ecs->event_thread->stepping_over_breakpoint = 0;
4246 ecs->event_thread->stepping_over_watchpoint = 0;
4247 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4248 ecs->event_thread->control.stop_step = 0;
4249 stop_print_frame = 1;
4250 stopped_by_random_signal = 0;
4251
4252 /* Hide inlined functions starting here, unless we just performed stepi or
4253 nexti. After stepi and nexti, always show the innermost frame (not any
4254 inline function call sites). */
4255 if (ecs->event_thread->control.step_range_end != 1)
4256 {
4257 struct address_space *aspace =
4258 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4259
4260 /* skip_inline_frames is expensive, so we avoid it if we can
4261 determine that the address is one where functions cannot have
4262 been inlined. This improves performance with inferiors that
4263 load a lot of shared libraries, because the solib event
4264 breakpoint is defined as the address of a function (i.e. not
4265 inline). Note that we have to check the previous PC as well
4266 as the current one to catch cases when we have just
4267 single-stepped off a breakpoint prior to reinstating it.
4268 Note that we're assuming that the code we single-step to is
4269 not inline, but that's not definitive: there's nothing
4270 preventing the event breakpoint function from containing
4271 inlined code, and the single-step ending up there. If the
4272 user had set a breakpoint on that inlined code, the missing
4273 skip_inline_frames call would break things. Fortunately
4274 that's an extremely unlikely scenario. */
4275 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4276 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4277 && ecs->event_thread->control.trap_expected
4278 && pc_at_non_inline_function (aspace,
4279 ecs->event_thread->prev_pc,
4280 &ecs->ws)))
4281 {
4282 skip_inline_frames (ecs->ptid);
4283
4284 /* Re-fetch current thread's frame in case that invalidated
4285 the frame cache. */
4286 frame = get_current_frame ();
4287 gdbarch = get_frame_arch (frame);
4288 }
4289 }
4290
4291 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4292 && ecs->event_thread->control.trap_expected
4293 && gdbarch_single_step_through_delay_p (gdbarch)
4294 && currently_stepping (ecs->event_thread))
4295 {
4296 /* We're trying to step off a breakpoint. Turns out that we're
4297 also on an instruction that needs to be stepped multiple
4298 times before it's been fully executing. E.g., architectures
4299 with a delay slot. It needs to be stepped twice, once for
4300 the instruction and once for the delay slot. */
4301 int step_through_delay
4302 = gdbarch_single_step_through_delay (gdbarch, frame);
4303
4304 if (debug_infrun && step_through_delay)
4305 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4306 if (ecs->event_thread->control.step_range_end == 0
4307 && step_through_delay)
4308 {
4309 /* The user issued a continue when stopped at a breakpoint.
4310 Set up for another trap and get out of here. */
4311 ecs->event_thread->stepping_over_breakpoint = 1;
4312 keep_going (ecs);
4313 return;
4314 }
4315 else if (step_through_delay)
4316 {
4317 /* The user issued a step when stopped at a breakpoint.
4318 Maybe we should stop, maybe we should not - the delay
4319 slot *might* correspond to a line of source. In any
4320 case, don't decide that here, just set
4321 ecs->stepping_over_breakpoint, making sure we
4322 single-step again before breakpoints are re-inserted. */
4323 ecs->event_thread->stepping_over_breakpoint = 1;
4324 }
4325 }
4326
4327 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4328 handles this event. */
4329 ecs->event_thread->control.stop_bpstat
4330 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4331 stop_pc, ecs->ptid, &ecs->ws);
4332
4333 /* Following in case break condition called a
4334 function. */
4335 stop_print_frame = 1;
4336
4337 /* This is where we handle "moribund" watchpoints. Unlike
4338 software breakpoints traps, hardware watchpoint traps are
4339 always distinguishable from random traps. If no high-level
4340 watchpoint is associated with the reported stop data address
4341 anymore, then the bpstat does not explain the signal ---
4342 simply make sure to ignore it if `stopped_by_watchpoint' is
4343 set. */
4344
4345 if (debug_infrun
4346 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4347 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4348 GDB_SIGNAL_TRAP)
4349 && stopped_by_watchpoint)
4350 fprintf_unfiltered (gdb_stdlog,
4351 "infrun: no user watchpoint explains "
4352 "watchpoint SIGTRAP, ignoring\n");
4353
4354 /* NOTE: cagney/2003-03-29: These checks for a random signal
4355 at one stage in the past included checks for an inferior
4356 function call's call dummy's return breakpoint. The original
4357 comment, that went with the test, read:
4358
4359 ``End of a stack dummy. Some systems (e.g. Sony news) give
4360 another signal besides SIGTRAP, so check here as well as
4361 above.''
4362
4363 If someone ever tries to get call dummys on a
4364 non-executable stack to work (where the target would stop
4365 with something like a SIGSEGV), then those tests might need
4366 to be re-instated. Given, however, that the tests were only
4367 enabled when momentary breakpoints were not being used, I
4368 suspect that it won't be the case.
4369
4370 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4371 be necessary for call dummies on a non-executable stack on
4372 SPARC. */
4373
4374 /* See if the breakpoints module can explain the signal. */
4375 random_signal
4376 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4377 ecs->event_thread->suspend.stop_signal);
4378
4379 /* If not, perhaps stepping/nexting can. */
4380 if (random_signal)
4381 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4382 && currently_stepping (ecs->event_thread));
4383
4384 /* Perhaps the thread hit a single-step breakpoint of _another_
4385 thread. Single-step breakpoints are transparent to the
4386 breakpoints module. */
4387 if (random_signal)
4388 random_signal = !ecs->hit_singlestep_breakpoint;
4389
4390 /* No? Perhaps we got a moribund watchpoint. */
4391 if (random_signal)
4392 random_signal = !stopped_by_watchpoint;
4393
4394 /* For the program's own signals, act according to
4395 the signal handling tables. */
4396
4397 if (random_signal)
4398 {
4399 /* Signal not for debugging purposes. */
4400 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4401 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4402
4403 if (debug_infrun)
4404 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4405 gdb_signal_to_symbol_string (stop_signal));
4406
4407 stopped_by_random_signal = 1;
4408
4409 /* Always stop on signals if we're either just gaining control
4410 of the program, or the user explicitly requested this thread
4411 to remain stopped. */
4412 if (stop_soon != NO_STOP_QUIETLY
4413 || ecs->event_thread->stop_requested
4414 || (!inf->detaching
4415 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4416 {
4417 stop_waiting (ecs);
4418 return;
4419 }
4420
4421 /* Notify observers the signal has "handle print" set. Note we
4422 returned early above if stopping; normal_stop handles the
4423 printing in that case. */
4424 if (signal_print[ecs->event_thread->suspend.stop_signal])
4425 {
4426 /* The signal table tells us to print about this signal. */
4427 target_terminal_ours_for_output ();
4428 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4429 target_terminal_inferior ();
4430 }
4431
4432 /* Clear the signal if it should not be passed. */
4433 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4434 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4435
4436 if (ecs->event_thread->prev_pc == stop_pc
4437 && ecs->event_thread->control.trap_expected
4438 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4439 {
4440 /* We were just starting a new sequence, attempting to
4441 single-step off of a breakpoint and expecting a SIGTRAP.
4442 Instead this signal arrives. This signal will take us out
4443 of the stepping range so GDB needs to remember to, when
4444 the signal handler returns, resume stepping off that
4445 breakpoint. */
4446 /* To simplify things, "continue" is forced to use the same
4447 code paths as single-step - set a breakpoint at the
4448 signal return address and then, once hit, step off that
4449 breakpoint. */
4450 if (debug_infrun)
4451 fprintf_unfiltered (gdb_stdlog,
4452 "infrun: signal arrived while stepping over "
4453 "breakpoint\n");
4454
4455 insert_hp_step_resume_breakpoint_at_frame (frame);
4456 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4457 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4458 ecs->event_thread->control.trap_expected = 0;
4459
4460 /* If we were nexting/stepping some other thread, switch to
4461 it, so that we don't continue it, losing control. */
4462 if (!switch_back_to_stepped_thread (ecs))
4463 keep_going (ecs);
4464 return;
4465 }
4466
4467 if (ecs->event_thread->control.step_range_end != 0
4468 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4469 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4470 && frame_id_eq (get_stack_frame_id (frame),
4471 ecs->event_thread->control.step_stack_frame_id)
4472 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4473 {
4474 /* The inferior is about to take a signal that will take it
4475 out of the single step range. Set a breakpoint at the
4476 current PC (which is presumably where the signal handler
4477 will eventually return) and then allow the inferior to
4478 run free.
4479
4480 Note that this is only needed for a signal delivered
4481 while in the single-step range. Nested signals aren't a
4482 problem as they eventually all return. */
4483 if (debug_infrun)
4484 fprintf_unfiltered (gdb_stdlog,
4485 "infrun: signal may take us out of "
4486 "single-step range\n");
4487
4488 insert_hp_step_resume_breakpoint_at_frame (frame);
4489 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4490 ecs->event_thread->control.trap_expected = 0;
4491 keep_going (ecs);
4492 return;
4493 }
4494
4495 /* Note: step_resume_breakpoint may be non-NULL. This occures
4496 when either there's a nested signal, or when there's a
4497 pending signal enabled just as the signal handler returns
4498 (leaving the inferior at the step-resume-breakpoint without
4499 actually executing it). Either way continue until the
4500 breakpoint is really hit. */
4501
4502 if (!switch_back_to_stepped_thread (ecs))
4503 {
4504 if (debug_infrun)
4505 fprintf_unfiltered (gdb_stdlog,
4506 "infrun: random signal, keep going\n");
4507
4508 keep_going (ecs);
4509 }
4510 return;
4511 }
4512
4513 process_event_stop_test (ecs);
4514 }
4515
4516 /* Come here when we've got some debug event / signal we can explain
4517 (IOW, not a random signal), and test whether it should cause a
4518 stop, or whether we should resume the inferior (transparently).
4519 E.g., could be a breakpoint whose condition evaluates false; we
4520 could be still stepping within the line; etc. */
4521
4522 static void
4523 process_event_stop_test (struct execution_control_state *ecs)
4524 {
4525 struct symtab_and_line stop_pc_sal;
4526 struct frame_info *frame;
4527 struct gdbarch *gdbarch;
4528 CORE_ADDR jmp_buf_pc;
4529 struct bpstat_what what;
4530
4531 /* Handle cases caused by hitting a breakpoint. */
4532
4533 frame = get_current_frame ();
4534 gdbarch = get_frame_arch (frame);
4535
4536 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4537
4538 if (what.call_dummy)
4539 {
4540 stop_stack_dummy = what.call_dummy;
4541 }
4542
4543 /* If we hit an internal event that triggers symbol changes, the
4544 current frame will be invalidated within bpstat_what (e.g., if we
4545 hit an internal solib event). Re-fetch it. */
4546 frame = get_current_frame ();
4547 gdbarch = get_frame_arch (frame);
4548
4549 switch (what.main_action)
4550 {
4551 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4552 /* If we hit the breakpoint at longjmp while stepping, we
4553 install a momentary breakpoint at the target of the
4554 jmp_buf. */
4555
4556 if (debug_infrun)
4557 fprintf_unfiltered (gdb_stdlog,
4558 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4559
4560 ecs->event_thread->stepping_over_breakpoint = 1;
4561
4562 if (what.is_longjmp)
4563 {
4564 struct value *arg_value;
4565
4566 /* If we set the longjmp breakpoint via a SystemTap probe,
4567 then use it to extract the arguments. The destination PC
4568 is the third argument to the probe. */
4569 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4570 if (arg_value)
4571 {
4572 jmp_buf_pc = value_as_address (arg_value);
4573 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4574 }
4575 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4576 || !gdbarch_get_longjmp_target (gdbarch,
4577 frame, &jmp_buf_pc))
4578 {
4579 if (debug_infrun)
4580 fprintf_unfiltered (gdb_stdlog,
4581 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4582 "(!gdbarch_get_longjmp_target)\n");
4583 keep_going (ecs);
4584 return;
4585 }
4586
4587 /* Insert a breakpoint at resume address. */
4588 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4589 }
4590 else
4591 check_exception_resume (ecs, frame);
4592 keep_going (ecs);
4593 return;
4594
4595 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4596 {
4597 struct frame_info *init_frame;
4598
4599 /* There are several cases to consider.
4600
4601 1. The initiating frame no longer exists. In this case we
4602 must stop, because the exception or longjmp has gone too
4603 far.
4604
4605 2. The initiating frame exists, and is the same as the
4606 current frame. We stop, because the exception or longjmp
4607 has been caught.
4608
4609 3. The initiating frame exists and is different from the
4610 current frame. This means the exception or longjmp has
4611 been caught beneath the initiating frame, so keep going.
4612
4613 4. longjmp breakpoint has been placed just to protect
4614 against stale dummy frames and user is not interested in
4615 stopping around longjmps. */
4616
4617 if (debug_infrun)
4618 fprintf_unfiltered (gdb_stdlog,
4619 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4620
4621 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4622 != NULL);
4623 delete_exception_resume_breakpoint (ecs->event_thread);
4624
4625 if (what.is_longjmp)
4626 {
4627 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4628
4629 if (!frame_id_p (ecs->event_thread->initiating_frame))
4630 {
4631 /* Case 4. */
4632 keep_going (ecs);
4633 return;
4634 }
4635 }
4636
4637 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4638
4639 if (init_frame)
4640 {
4641 struct frame_id current_id
4642 = get_frame_id (get_current_frame ());
4643 if (frame_id_eq (current_id,
4644 ecs->event_thread->initiating_frame))
4645 {
4646 /* Case 2. Fall through. */
4647 }
4648 else
4649 {
4650 /* Case 3. */
4651 keep_going (ecs);
4652 return;
4653 }
4654 }
4655
4656 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4657 exists. */
4658 delete_step_resume_breakpoint (ecs->event_thread);
4659
4660 end_stepping_range (ecs);
4661 }
4662 return;
4663
4664 case BPSTAT_WHAT_SINGLE:
4665 if (debug_infrun)
4666 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4667 ecs->event_thread->stepping_over_breakpoint = 1;
4668 /* Still need to check other stuff, at least the case where we
4669 are stepping and step out of the right range. */
4670 break;
4671
4672 case BPSTAT_WHAT_STEP_RESUME:
4673 if (debug_infrun)
4674 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4675
4676 delete_step_resume_breakpoint (ecs->event_thread);
4677 if (ecs->event_thread->control.proceed_to_finish
4678 && execution_direction == EXEC_REVERSE)
4679 {
4680 struct thread_info *tp = ecs->event_thread;
4681
4682 /* We are finishing a function in reverse, and just hit the
4683 step-resume breakpoint at the start address of the
4684 function, and we're almost there -- just need to back up
4685 by one more single-step, which should take us back to the
4686 function call. */
4687 tp->control.step_range_start = tp->control.step_range_end = 1;
4688 keep_going (ecs);
4689 return;
4690 }
4691 fill_in_stop_func (gdbarch, ecs);
4692 if (stop_pc == ecs->stop_func_start
4693 && execution_direction == EXEC_REVERSE)
4694 {
4695 /* We are stepping over a function call in reverse, and just
4696 hit the step-resume breakpoint at the start address of
4697 the function. Go back to single-stepping, which should
4698 take us back to the function call. */
4699 ecs->event_thread->stepping_over_breakpoint = 1;
4700 keep_going (ecs);
4701 return;
4702 }
4703 break;
4704
4705 case BPSTAT_WHAT_STOP_NOISY:
4706 if (debug_infrun)
4707 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4708 stop_print_frame = 1;
4709
4710 /* Assume the thread stopped for a breapoint. We'll still check
4711 whether a/the breakpoint is there when the thread is next
4712 resumed. */
4713 ecs->event_thread->stepping_over_breakpoint = 1;
4714
4715 stop_waiting (ecs);
4716 return;
4717
4718 case BPSTAT_WHAT_STOP_SILENT:
4719 if (debug_infrun)
4720 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4721 stop_print_frame = 0;
4722
4723 /* Assume the thread stopped for a breapoint. We'll still check
4724 whether a/the breakpoint is there when the thread is next
4725 resumed. */
4726 ecs->event_thread->stepping_over_breakpoint = 1;
4727 stop_waiting (ecs);
4728 return;
4729
4730 case BPSTAT_WHAT_HP_STEP_RESUME:
4731 if (debug_infrun)
4732 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4733
4734 delete_step_resume_breakpoint (ecs->event_thread);
4735 if (ecs->event_thread->step_after_step_resume_breakpoint)
4736 {
4737 /* Back when the step-resume breakpoint was inserted, we
4738 were trying to single-step off a breakpoint. Go back to
4739 doing that. */
4740 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4741 ecs->event_thread->stepping_over_breakpoint = 1;
4742 keep_going (ecs);
4743 return;
4744 }
4745 break;
4746
4747 case BPSTAT_WHAT_KEEP_CHECKING:
4748 break;
4749 }
4750
4751 /* We come here if we hit a breakpoint but should not stop for it.
4752 Possibly we also were stepping and should stop for that. So fall
4753 through and test for stepping. But, if not stepping, do not
4754 stop. */
4755
4756 /* In all-stop mode, if we're currently stepping but have stopped in
4757 some other thread, we need to switch back to the stepped thread. */
4758 if (switch_back_to_stepped_thread (ecs))
4759 return;
4760
4761 if (ecs->event_thread->control.step_resume_breakpoint)
4762 {
4763 if (debug_infrun)
4764 fprintf_unfiltered (gdb_stdlog,
4765 "infrun: step-resume breakpoint is inserted\n");
4766
4767 /* Having a step-resume breakpoint overrides anything
4768 else having to do with stepping commands until
4769 that breakpoint is reached. */
4770 keep_going (ecs);
4771 return;
4772 }
4773
4774 if (ecs->event_thread->control.step_range_end == 0)
4775 {
4776 if (debug_infrun)
4777 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4778 /* Likewise if we aren't even stepping. */
4779 keep_going (ecs);
4780 return;
4781 }
4782
4783 /* Re-fetch current thread's frame in case the code above caused
4784 the frame cache to be re-initialized, making our FRAME variable
4785 a dangling pointer. */
4786 frame = get_current_frame ();
4787 gdbarch = get_frame_arch (frame);
4788 fill_in_stop_func (gdbarch, ecs);
4789
4790 /* If stepping through a line, keep going if still within it.
4791
4792 Note that step_range_end is the address of the first instruction
4793 beyond the step range, and NOT the address of the last instruction
4794 within it!
4795
4796 Note also that during reverse execution, we may be stepping
4797 through a function epilogue and therefore must detect when
4798 the current-frame changes in the middle of a line. */
4799
4800 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4801 && (execution_direction != EXEC_REVERSE
4802 || frame_id_eq (get_frame_id (frame),
4803 ecs->event_thread->control.step_frame_id)))
4804 {
4805 if (debug_infrun)
4806 fprintf_unfiltered
4807 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4808 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4809 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4810
4811 /* Tentatively re-enable range stepping; `resume' disables it if
4812 necessary (e.g., if we're stepping over a breakpoint or we
4813 have software watchpoints). */
4814 ecs->event_thread->control.may_range_step = 1;
4815
4816 /* When stepping backward, stop at beginning of line range
4817 (unless it's the function entry point, in which case
4818 keep going back to the call point). */
4819 if (stop_pc == ecs->event_thread->control.step_range_start
4820 && stop_pc != ecs->stop_func_start
4821 && execution_direction == EXEC_REVERSE)
4822 end_stepping_range (ecs);
4823 else
4824 keep_going (ecs);
4825
4826 return;
4827 }
4828
4829 /* We stepped out of the stepping range. */
4830
4831 /* If we are stepping at the source level and entered the runtime
4832 loader dynamic symbol resolution code...
4833
4834 EXEC_FORWARD: we keep on single stepping until we exit the run
4835 time loader code and reach the callee's address.
4836
4837 EXEC_REVERSE: we've already executed the callee (backward), and
4838 the runtime loader code is handled just like any other
4839 undebuggable function call. Now we need only keep stepping
4840 backward through the trampoline code, and that's handled further
4841 down, so there is nothing for us to do here. */
4842
4843 if (execution_direction != EXEC_REVERSE
4844 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4845 && in_solib_dynsym_resolve_code (stop_pc))
4846 {
4847 CORE_ADDR pc_after_resolver =
4848 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4849
4850 if (debug_infrun)
4851 fprintf_unfiltered (gdb_stdlog,
4852 "infrun: stepped into dynsym resolve code\n");
4853
4854 if (pc_after_resolver)
4855 {
4856 /* Set up a step-resume breakpoint at the address
4857 indicated by SKIP_SOLIB_RESOLVER. */
4858 struct symtab_and_line sr_sal;
4859
4860 init_sal (&sr_sal);
4861 sr_sal.pc = pc_after_resolver;
4862 sr_sal.pspace = get_frame_program_space (frame);
4863
4864 insert_step_resume_breakpoint_at_sal (gdbarch,
4865 sr_sal, null_frame_id);
4866 }
4867
4868 keep_going (ecs);
4869 return;
4870 }
4871
4872 if (ecs->event_thread->control.step_range_end != 1
4873 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4874 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4875 && get_frame_type (frame) == SIGTRAMP_FRAME)
4876 {
4877 if (debug_infrun)
4878 fprintf_unfiltered (gdb_stdlog,
4879 "infrun: stepped into signal trampoline\n");
4880 /* The inferior, while doing a "step" or "next", has ended up in
4881 a signal trampoline (either by a signal being delivered or by
4882 the signal handler returning). Just single-step until the
4883 inferior leaves the trampoline (either by calling the handler
4884 or returning). */
4885 keep_going (ecs);
4886 return;
4887 }
4888
4889 /* If we're in the return path from a shared library trampoline,
4890 we want to proceed through the trampoline when stepping. */
4891 /* macro/2012-04-25: This needs to come before the subroutine
4892 call check below as on some targets return trampolines look
4893 like subroutine calls (MIPS16 return thunks). */
4894 if (gdbarch_in_solib_return_trampoline (gdbarch,
4895 stop_pc, ecs->stop_func_name)
4896 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4897 {
4898 /* Determine where this trampoline returns. */
4899 CORE_ADDR real_stop_pc;
4900
4901 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4902
4903 if (debug_infrun)
4904 fprintf_unfiltered (gdb_stdlog,
4905 "infrun: stepped into solib return tramp\n");
4906
4907 /* Only proceed through if we know where it's going. */
4908 if (real_stop_pc)
4909 {
4910 /* And put the step-breakpoint there and go until there. */
4911 struct symtab_and_line sr_sal;
4912
4913 init_sal (&sr_sal); /* initialize to zeroes */
4914 sr_sal.pc = real_stop_pc;
4915 sr_sal.section = find_pc_overlay (sr_sal.pc);
4916 sr_sal.pspace = get_frame_program_space (frame);
4917
4918 /* Do not specify what the fp should be when we stop since
4919 on some machines the prologue is where the new fp value
4920 is established. */
4921 insert_step_resume_breakpoint_at_sal (gdbarch,
4922 sr_sal, null_frame_id);
4923
4924 /* Restart without fiddling with the step ranges or
4925 other state. */
4926 keep_going (ecs);
4927 return;
4928 }
4929 }
4930
4931 /* Check for subroutine calls. The check for the current frame
4932 equalling the step ID is not necessary - the check of the
4933 previous frame's ID is sufficient - but it is a common case and
4934 cheaper than checking the previous frame's ID.
4935
4936 NOTE: frame_id_eq will never report two invalid frame IDs as
4937 being equal, so to get into this block, both the current and
4938 previous frame must have valid frame IDs. */
4939 /* The outer_frame_id check is a heuristic to detect stepping
4940 through startup code. If we step over an instruction which
4941 sets the stack pointer from an invalid value to a valid value,
4942 we may detect that as a subroutine call from the mythical
4943 "outermost" function. This could be fixed by marking
4944 outermost frames as !stack_p,code_p,special_p. Then the
4945 initial outermost frame, before sp was valid, would
4946 have code_addr == &_start. See the comment in frame_id_eq
4947 for more. */
4948 if (!frame_id_eq (get_stack_frame_id (frame),
4949 ecs->event_thread->control.step_stack_frame_id)
4950 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4951 ecs->event_thread->control.step_stack_frame_id)
4952 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4953 outer_frame_id)
4954 || step_start_function != find_pc_function (stop_pc))))
4955 {
4956 CORE_ADDR real_stop_pc;
4957
4958 if (debug_infrun)
4959 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4960
4961 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4962 || ((ecs->event_thread->control.step_range_end == 1)
4963 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4964 ecs->stop_func_start)))
4965 {
4966 /* I presume that step_over_calls is only 0 when we're
4967 supposed to be stepping at the assembly language level
4968 ("stepi"). Just stop. */
4969 /* Also, maybe we just did a "nexti" inside a prolog, so we
4970 thought it was a subroutine call but it was not. Stop as
4971 well. FENN */
4972 /* And this works the same backward as frontward. MVS */
4973 end_stepping_range (ecs);
4974 return;
4975 }
4976
4977 /* Reverse stepping through solib trampolines. */
4978
4979 if (execution_direction == EXEC_REVERSE
4980 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4981 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4982 || (ecs->stop_func_start == 0
4983 && in_solib_dynsym_resolve_code (stop_pc))))
4984 {
4985 /* Any solib trampoline code can be handled in reverse
4986 by simply continuing to single-step. We have already
4987 executed the solib function (backwards), and a few
4988 steps will take us back through the trampoline to the
4989 caller. */
4990 keep_going (ecs);
4991 return;
4992 }
4993
4994 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4995 {
4996 /* We're doing a "next".
4997
4998 Normal (forward) execution: set a breakpoint at the
4999 callee's return address (the address at which the caller
5000 will resume).
5001
5002 Reverse (backward) execution. set the step-resume
5003 breakpoint at the start of the function that we just
5004 stepped into (backwards), and continue to there. When we
5005 get there, we'll need to single-step back to the caller. */
5006
5007 if (execution_direction == EXEC_REVERSE)
5008 {
5009 /* If we're already at the start of the function, we've either
5010 just stepped backward into a single instruction function,
5011 or stepped back out of a signal handler to the first instruction
5012 of the function. Just keep going, which will single-step back
5013 to the caller. */
5014 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5015 {
5016 struct symtab_and_line sr_sal;
5017
5018 /* Normal function call return (static or dynamic). */
5019 init_sal (&sr_sal);
5020 sr_sal.pc = ecs->stop_func_start;
5021 sr_sal.pspace = get_frame_program_space (frame);
5022 insert_step_resume_breakpoint_at_sal (gdbarch,
5023 sr_sal, null_frame_id);
5024 }
5025 }
5026 else
5027 insert_step_resume_breakpoint_at_caller (frame);
5028
5029 keep_going (ecs);
5030 return;
5031 }
5032
5033 /* If we are in a function call trampoline (a stub between the
5034 calling routine and the real function), locate the real
5035 function. That's what tells us (a) whether we want to step
5036 into it at all, and (b) what prologue we want to run to the
5037 end of, if we do step into it. */
5038 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5039 if (real_stop_pc == 0)
5040 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5041 if (real_stop_pc != 0)
5042 ecs->stop_func_start = real_stop_pc;
5043
5044 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5045 {
5046 struct symtab_and_line sr_sal;
5047
5048 init_sal (&sr_sal);
5049 sr_sal.pc = ecs->stop_func_start;
5050 sr_sal.pspace = get_frame_program_space (frame);
5051
5052 insert_step_resume_breakpoint_at_sal (gdbarch,
5053 sr_sal, null_frame_id);
5054 keep_going (ecs);
5055 return;
5056 }
5057
5058 /* If we have line number information for the function we are
5059 thinking of stepping into and the function isn't on the skip
5060 list, step into it.
5061
5062 If there are several symtabs at that PC (e.g. with include
5063 files), just want to know whether *any* of them have line
5064 numbers. find_pc_line handles this. */
5065 {
5066 struct symtab_and_line tmp_sal;
5067
5068 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5069 if (tmp_sal.line != 0
5070 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5071 &tmp_sal))
5072 {
5073 if (execution_direction == EXEC_REVERSE)
5074 handle_step_into_function_backward (gdbarch, ecs);
5075 else
5076 handle_step_into_function (gdbarch, ecs);
5077 return;
5078 }
5079 }
5080
5081 /* If we have no line number and the step-stop-if-no-debug is
5082 set, we stop the step so that the user has a chance to switch
5083 in assembly mode. */
5084 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5085 && step_stop_if_no_debug)
5086 {
5087 end_stepping_range (ecs);
5088 return;
5089 }
5090
5091 if (execution_direction == EXEC_REVERSE)
5092 {
5093 /* If we're already at the start of the function, we've either just
5094 stepped backward into a single instruction function without line
5095 number info, or stepped back out of a signal handler to the first
5096 instruction of the function without line number info. Just keep
5097 going, which will single-step back to the caller. */
5098 if (ecs->stop_func_start != stop_pc)
5099 {
5100 /* Set a breakpoint at callee's start address.
5101 From there we can step once and be back in the caller. */
5102 struct symtab_and_line sr_sal;
5103
5104 init_sal (&sr_sal);
5105 sr_sal.pc = ecs->stop_func_start;
5106 sr_sal.pspace = get_frame_program_space (frame);
5107 insert_step_resume_breakpoint_at_sal (gdbarch,
5108 sr_sal, null_frame_id);
5109 }
5110 }
5111 else
5112 /* Set a breakpoint at callee's return address (the address
5113 at which the caller will resume). */
5114 insert_step_resume_breakpoint_at_caller (frame);
5115
5116 keep_going (ecs);
5117 return;
5118 }
5119
5120 /* Reverse stepping through solib trampolines. */
5121
5122 if (execution_direction == EXEC_REVERSE
5123 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5124 {
5125 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5126 || (ecs->stop_func_start == 0
5127 && in_solib_dynsym_resolve_code (stop_pc)))
5128 {
5129 /* Any solib trampoline code can be handled in reverse
5130 by simply continuing to single-step. We have already
5131 executed the solib function (backwards), and a few
5132 steps will take us back through the trampoline to the
5133 caller. */
5134 keep_going (ecs);
5135 return;
5136 }
5137 else if (in_solib_dynsym_resolve_code (stop_pc))
5138 {
5139 /* Stepped backward into the solib dynsym resolver.
5140 Set a breakpoint at its start and continue, then
5141 one more step will take us out. */
5142 struct symtab_and_line sr_sal;
5143
5144 init_sal (&sr_sal);
5145 sr_sal.pc = ecs->stop_func_start;
5146 sr_sal.pspace = get_frame_program_space (frame);
5147 insert_step_resume_breakpoint_at_sal (gdbarch,
5148 sr_sal, null_frame_id);
5149 keep_going (ecs);
5150 return;
5151 }
5152 }
5153
5154 stop_pc_sal = find_pc_line (stop_pc, 0);
5155
5156 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5157 the trampoline processing logic, however, there are some trampolines
5158 that have no names, so we should do trampoline handling first. */
5159 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5160 && ecs->stop_func_name == NULL
5161 && stop_pc_sal.line == 0)
5162 {
5163 if (debug_infrun)
5164 fprintf_unfiltered (gdb_stdlog,
5165 "infrun: stepped into undebuggable function\n");
5166
5167 /* The inferior just stepped into, or returned to, an
5168 undebuggable function (where there is no debugging information
5169 and no line number corresponding to the address where the
5170 inferior stopped). Since we want to skip this kind of code,
5171 we keep going until the inferior returns from this
5172 function - unless the user has asked us not to (via
5173 set step-mode) or we no longer know how to get back
5174 to the call site. */
5175 if (step_stop_if_no_debug
5176 || !frame_id_p (frame_unwind_caller_id (frame)))
5177 {
5178 /* If we have no line number and the step-stop-if-no-debug
5179 is set, we stop the step so that the user has a chance to
5180 switch in assembly mode. */
5181 end_stepping_range (ecs);
5182 return;
5183 }
5184 else
5185 {
5186 /* Set a breakpoint at callee's return address (the address
5187 at which the caller will resume). */
5188 insert_step_resume_breakpoint_at_caller (frame);
5189 keep_going (ecs);
5190 return;
5191 }
5192 }
5193
5194 if (ecs->event_thread->control.step_range_end == 1)
5195 {
5196 /* It is stepi or nexti. We always want to stop stepping after
5197 one instruction. */
5198 if (debug_infrun)
5199 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5200 end_stepping_range (ecs);
5201 return;
5202 }
5203
5204 if (stop_pc_sal.line == 0)
5205 {
5206 /* We have no line number information. That means to stop
5207 stepping (does this always happen right after one instruction,
5208 when we do "s" in a function with no line numbers,
5209 or can this happen as a result of a return or longjmp?). */
5210 if (debug_infrun)
5211 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5212 end_stepping_range (ecs);
5213 return;
5214 }
5215
5216 /* Look for "calls" to inlined functions, part one. If the inline
5217 frame machinery detected some skipped call sites, we have entered
5218 a new inline function. */
5219
5220 if (frame_id_eq (get_frame_id (get_current_frame ()),
5221 ecs->event_thread->control.step_frame_id)
5222 && inline_skipped_frames (ecs->ptid))
5223 {
5224 struct symtab_and_line call_sal;
5225
5226 if (debug_infrun)
5227 fprintf_unfiltered (gdb_stdlog,
5228 "infrun: stepped into inlined function\n");
5229
5230 find_frame_sal (get_current_frame (), &call_sal);
5231
5232 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5233 {
5234 /* For "step", we're going to stop. But if the call site
5235 for this inlined function is on the same source line as
5236 we were previously stepping, go down into the function
5237 first. Otherwise stop at the call site. */
5238
5239 if (call_sal.line == ecs->event_thread->current_line
5240 && call_sal.symtab == ecs->event_thread->current_symtab)
5241 step_into_inline_frame (ecs->ptid);
5242
5243 end_stepping_range (ecs);
5244 return;
5245 }
5246 else
5247 {
5248 /* For "next", we should stop at the call site if it is on a
5249 different source line. Otherwise continue through the
5250 inlined function. */
5251 if (call_sal.line == ecs->event_thread->current_line
5252 && call_sal.symtab == ecs->event_thread->current_symtab)
5253 keep_going (ecs);
5254 else
5255 end_stepping_range (ecs);
5256 return;
5257 }
5258 }
5259
5260 /* Look for "calls" to inlined functions, part two. If we are still
5261 in the same real function we were stepping through, but we have
5262 to go further up to find the exact frame ID, we are stepping
5263 through a more inlined call beyond its call site. */
5264
5265 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5266 && !frame_id_eq (get_frame_id (get_current_frame ()),
5267 ecs->event_thread->control.step_frame_id)
5268 && stepped_in_from (get_current_frame (),
5269 ecs->event_thread->control.step_frame_id))
5270 {
5271 if (debug_infrun)
5272 fprintf_unfiltered (gdb_stdlog,
5273 "infrun: stepping through inlined function\n");
5274
5275 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5276 keep_going (ecs);
5277 else
5278 end_stepping_range (ecs);
5279 return;
5280 }
5281
5282 if ((stop_pc == stop_pc_sal.pc)
5283 && (ecs->event_thread->current_line != stop_pc_sal.line
5284 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5285 {
5286 /* We are at the start of a different line. So stop. Note that
5287 we don't stop if we step into the middle of a different line.
5288 That is said to make things like for (;;) statements work
5289 better. */
5290 if (debug_infrun)
5291 fprintf_unfiltered (gdb_stdlog,
5292 "infrun: stepped to a different line\n");
5293 end_stepping_range (ecs);
5294 return;
5295 }
5296
5297 /* We aren't done stepping.
5298
5299 Optimize by setting the stepping range to the line.
5300 (We might not be in the original line, but if we entered a
5301 new line in mid-statement, we continue stepping. This makes
5302 things like for(;;) statements work better.) */
5303
5304 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5305 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5306 ecs->event_thread->control.may_range_step = 1;
5307 set_step_info (frame, stop_pc_sal);
5308
5309 if (debug_infrun)
5310 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5311 keep_going (ecs);
5312 }
5313
5314 /* In all-stop mode, if we're currently stepping but have stopped in
5315 some other thread, we may need to switch back to the stepped
5316 thread. Returns true we set the inferior running, false if we left
5317 it stopped (and the event needs further processing). */
5318
5319 static int
5320 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5321 {
5322 if (!non_stop)
5323 {
5324 struct thread_info *tp;
5325 struct thread_info *stepping_thread;
5326 struct thread_info *step_over;
5327
5328 /* If any thread is blocked on some internal breakpoint, and we
5329 simply need to step over that breakpoint to get it going
5330 again, do that first. */
5331
5332 /* However, if we see an event for the stepping thread, then we
5333 know all other threads have been moved past their breakpoints
5334 already. Let the caller check whether the step is finished,
5335 etc., before deciding to move it past a breakpoint. */
5336 if (ecs->event_thread->control.step_range_end != 0)
5337 return 0;
5338
5339 /* Check if the current thread is blocked on an incomplete
5340 step-over, interrupted by a random signal. */
5341 if (ecs->event_thread->control.trap_expected
5342 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5343 {
5344 if (debug_infrun)
5345 {
5346 fprintf_unfiltered (gdb_stdlog,
5347 "infrun: need to finish step-over of [%s]\n",
5348 target_pid_to_str (ecs->event_thread->ptid));
5349 }
5350 keep_going (ecs);
5351 return 1;
5352 }
5353
5354 /* Check if the current thread is blocked by a single-step
5355 breakpoint of another thread. */
5356 if (ecs->hit_singlestep_breakpoint)
5357 {
5358 if (debug_infrun)
5359 {
5360 fprintf_unfiltered (gdb_stdlog,
5361 "infrun: need to step [%s] over single-step "
5362 "breakpoint\n",
5363 target_pid_to_str (ecs->ptid));
5364 }
5365 keep_going (ecs);
5366 return 1;
5367 }
5368
5369 /* Otherwise, we no longer expect a trap in the current thread.
5370 Clear the trap_expected flag before switching back -- this is
5371 what keep_going does as well, if we call it. */
5372 ecs->event_thread->control.trap_expected = 0;
5373
5374 /* Likewise, clear the signal if it should not be passed. */
5375 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5376 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5377
5378 /* If scheduler locking applies even if not stepping, there's no
5379 need to walk over threads. Above we've checked whether the
5380 current thread is stepping. If some other thread not the
5381 event thread is stepping, then it must be that scheduler
5382 locking is not in effect. */
5383 if (schedlock_applies (0))
5384 return 0;
5385
5386 /* Look for the stepping/nexting thread, and check if any other
5387 thread other than the stepping thread needs to start a
5388 step-over. Do all step-overs before actually proceeding with
5389 step/next/etc. */
5390 stepping_thread = NULL;
5391 step_over = NULL;
5392 ALL_NON_EXITED_THREADS (tp)
5393 {
5394 /* Ignore threads of processes we're not resuming. */
5395 if (!sched_multi
5396 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5397 continue;
5398
5399 /* When stepping over a breakpoint, we lock all threads
5400 except the one that needs to move past the breakpoint.
5401 If a non-event thread has this set, the "incomplete
5402 step-over" check above should have caught it earlier. */
5403 gdb_assert (!tp->control.trap_expected);
5404
5405 /* Did we find the stepping thread? */
5406 if (tp->control.step_range_end)
5407 {
5408 /* Yep. There should only one though. */
5409 gdb_assert (stepping_thread == NULL);
5410
5411 /* The event thread is handled at the top, before we
5412 enter this loop. */
5413 gdb_assert (tp != ecs->event_thread);
5414
5415 /* If some thread other than the event thread is
5416 stepping, then scheduler locking can't be in effect,
5417 otherwise we wouldn't have resumed the current event
5418 thread in the first place. */
5419 gdb_assert (!schedlock_applies (1));
5420
5421 stepping_thread = tp;
5422 }
5423 else if (thread_still_needs_step_over (tp))
5424 {
5425 step_over = tp;
5426
5427 /* At the top we've returned early if the event thread
5428 is stepping. If some other thread not the event
5429 thread is stepping, then scheduler locking can't be
5430 in effect, and we can resume this thread. No need to
5431 keep looking for the stepping thread then. */
5432 break;
5433 }
5434 }
5435
5436 if (step_over != NULL)
5437 {
5438 tp = step_over;
5439 if (debug_infrun)
5440 {
5441 fprintf_unfiltered (gdb_stdlog,
5442 "infrun: need to step-over [%s]\n",
5443 target_pid_to_str (tp->ptid));
5444 }
5445
5446 /* Only the stepping thread should have this set. */
5447 gdb_assert (tp->control.step_range_end == 0);
5448
5449 ecs->ptid = tp->ptid;
5450 ecs->event_thread = tp;
5451 switch_to_thread (ecs->ptid);
5452 keep_going (ecs);
5453 return 1;
5454 }
5455
5456 if (stepping_thread != NULL)
5457 {
5458 struct frame_info *frame;
5459 struct gdbarch *gdbarch;
5460
5461 tp = stepping_thread;
5462
5463 /* If the stepping thread exited, then don't try to switch
5464 back and resume it, which could fail in several different
5465 ways depending on the target. Instead, just keep going.
5466
5467 We can find a stepping dead thread in the thread list in
5468 two cases:
5469
5470 - The target supports thread exit events, and when the
5471 target tries to delete the thread from the thread list,
5472 inferior_ptid pointed at the exiting thread. In such
5473 case, calling delete_thread does not really remove the
5474 thread from the list; instead, the thread is left listed,
5475 with 'exited' state.
5476
5477 - The target's debug interface does not support thread
5478 exit events, and so we have no idea whatsoever if the
5479 previously stepping thread is still alive. For that
5480 reason, we need to synchronously query the target
5481 now. */
5482 if (is_exited (tp->ptid)
5483 || !target_thread_alive (tp->ptid))
5484 {
5485 if (debug_infrun)
5486 fprintf_unfiltered (gdb_stdlog,
5487 "infrun: not switching back to "
5488 "stepped thread, it has vanished\n");
5489
5490 delete_thread (tp->ptid);
5491 keep_going (ecs);
5492 return 1;
5493 }
5494
5495 if (debug_infrun)
5496 fprintf_unfiltered (gdb_stdlog,
5497 "infrun: switching back to stepped thread\n");
5498
5499 ecs->event_thread = tp;
5500 ecs->ptid = tp->ptid;
5501 context_switch (ecs->ptid);
5502
5503 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5504 frame = get_current_frame ();
5505 gdbarch = get_frame_arch (frame);
5506
5507 /* If the PC of the thread we were trying to single-step has
5508 changed, then that thread has trapped or been signaled,
5509 but the event has not been reported to GDB yet. Re-poll
5510 the target looking for this particular thread's event
5511 (i.e. temporarily enable schedlock) by:
5512
5513 - setting a break at the current PC
5514 - resuming that particular thread, only (by setting
5515 trap expected)
5516
5517 This prevents us continuously moving the single-step
5518 breakpoint forward, one instruction at a time,
5519 overstepping. */
5520
5521 if (gdbarch_software_single_step_p (gdbarch)
5522 && stop_pc != tp->prev_pc)
5523 {
5524 if (debug_infrun)
5525 fprintf_unfiltered (gdb_stdlog,
5526 "infrun: expected thread advanced also\n");
5527
5528 insert_single_step_breakpoint (get_frame_arch (frame),
5529 get_frame_address_space (frame),
5530 stop_pc);
5531 singlestep_breakpoints_inserted_p = 1;
5532 ecs->event_thread->control.trap_expected = 1;
5533 singlestep_ptid = inferior_ptid;
5534 singlestep_pc = stop_pc;
5535
5536 resume (0, GDB_SIGNAL_0);
5537 prepare_to_wait (ecs);
5538 }
5539 else
5540 {
5541 if (debug_infrun)
5542 fprintf_unfiltered (gdb_stdlog,
5543 "infrun: expected thread still "
5544 "hasn't advanced\n");
5545 keep_going (ecs);
5546 }
5547
5548 return 1;
5549 }
5550 }
5551 return 0;
5552 }
5553
5554 /* Is thread TP in the middle of single-stepping? */
5555
5556 static int
5557 currently_stepping (struct thread_info *tp)
5558 {
5559 return ((tp->control.step_range_end
5560 && tp->control.step_resume_breakpoint == NULL)
5561 || tp->control.trap_expected
5562 || bpstat_should_step ());
5563 }
5564
5565 /* Inferior has stepped into a subroutine call with source code that
5566 we should not step over. Do step to the first line of code in
5567 it. */
5568
5569 static void
5570 handle_step_into_function (struct gdbarch *gdbarch,
5571 struct execution_control_state *ecs)
5572 {
5573 struct symtab *s;
5574 struct symtab_and_line stop_func_sal, sr_sal;
5575
5576 fill_in_stop_func (gdbarch, ecs);
5577
5578 s = find_pc_symtab (stop_pc);
5579 if (s && s->language != language_asm)
5580 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5581 ecs->stop_func_start);
5582
5583 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5584 /* Use the step_resume_break to step until the end of the prologue,
5585 even if that involves jumps (as it seems to on the vax under
5586 4.2). */
5587 /* If the prologue ends in the middle of a source line, continue to
5588 the end of that source line (if it is still within the function).
5589 Otherwise, just go to end of prologue. */
5590 if (stop_func_sal.end
5591 && stop_func_sal.pc != ecs->stop_func_start
5592 && stop_func_sal.end < ecs->stop_func_end)
5593 ecs->stop_func_start = stop_func_sal.end;
5594
5595 /* Architectures which require breakpoint adjustment might not be able
5596 to place a breakpoint at the computed address. If so, the test
5597 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5598 ecs->stop_func_start to an address at which a breakpoint may be
5599 legitimately placed.
5600
5601 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5602 made, GDB will enter an infinite loop when stepping through
5603 optimized code consisting of VLIW instructions which contain
5604 subinstructions corresponding to different source lines. On
5605 FR-V, it's not permitted to place a breakpoint on any but the
5606 first subinstruction of a VLIW instruction. When a breakpoint is
5607 set, GDB will adjust the breakpoint address to the beginning of
5608 the VLIW instruction. Thus, we need to make the corresponding
5609 adjustment here when computing the stop address. */
5610
5611 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5612 {
5613 ecs->stop_func_start
5614 = gdbarch_adjust_breakpoint_address (gdbarch,
5615 ecs->stop_func_start);
5616 }
5617
5618 if (ecs->stop_func_start == stop_pc)
5619 {
5620 /* We are already there: stop now. */
5621 end_stepping_range (ecs);
5622 return;
5623 }
5624 else
5625 {
5626 /* Put the step-breakpoint there and go until there. */
5627 init_sal (&sr_sal); /* initialize to zeroes */
5628 sr_sal.pc = ecs->stop_func_start;
5629 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5630 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5631
5632 /* Do not specify what the fp should be when we stop since on
5633 some machines the prologue is where the new fp value is
5634 established. */
5635 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5636
5637 /* And make sure stepping stops right away then. */
5638 ecs->event_thread->control.step_range_end
5639 = ecs->event_thread->control.step_range_start;
5640 }
5641 keep_going (ecs);
5642 }
5643
5644 /* Inferior has stepped backward into a subroutine call with source
5645 code that we should not step over. Do step to the beginning of the
5646 last line of code in it. */
5647
5648 static void
5649 handle_step_into_function_backward (struct gdbarch *gdbarch,
5650 struct execution_control_state *ecs)
5651 {
5652 struct symtab *s;
5653 struct symtab_and_line stop_func_sal;
5654
5655 fill_in_stop_func (gdbarch, ecs);
5656
5657 s = find_pc_symtab (stop_pc);
5658 if (s && s->language != language_asm)
5659 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5660 ecs->stop_func_start);
5661
5662 stop_func_sal = find_pc_line (stop_pc, 0);
5663
5664 /* OK, we're just going to keep stepping here. */
5665 if (stop_func_sal.pc == stop_pc)
5666 {
5667 /* We're there already. Just stop stepping now. */
5668 end_stepping_range (ecs);
5669 }
5670 else
5671 {
5672 /* Else just reset the step range and keep going.
5673 No step-resume breakpoint, they don't work for
5674 epilogues, which can have multiple entry paths. */
5675 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5676 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5677 keep_going (ecs);
5678 }
5679 return;
5680 }
5681
5682 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5683 This is used to both functions and to skip over code. */
5684
5685 static void
5686 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5687 struct symtab_and_line sr_sal,
5688 struct frame_id sr_id,
5689 enum bptype sr_type)
5690 {
5691 /* There should never be more than one step-resume or longjmp-resume
5692 breakpoint per thread, so we should never be setting a new
5693 step_resume_breakpoint when one is already active. */
5694 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5695 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5696
5697 if (debug_infrun)
5698 fprintf_unfiltered (gdb_stdlog,
5699 "infrun: inserting step-resume breakpoint at %s\n",
5700 paddress (gdbarch, sr_sal.pc));
5701
5702 inferior_thread ()->control.step_resume_breakpoint
5703 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5704 }
5705
5706 void
5707 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5708 struct symtab_and_line sr_sal,
5709 struct frame_id sr_id)
5710 {
5711 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5712 sr_sal, sr_id,
5713 bp_step_resume);
5714 }
5715
5716 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5717 This is used to skip a potential signal handler.
5718
5719 This is called with the interrupted function's frame. The signal
5720 handler, when it returns, will resume the interrupted function at
5721 RETURN_FRAME.pc. */
5722
5723 static void
5724 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5725 {
5726 struct symtab_and_line sr_sal;
5727 struct gdbarch *gdbarch;
5728
5729 gdb_assert (return_frame != NULL);
5730 init_sal (&sr_sal); /* initialize to zeros */
5731
5732 gdbarch = get_frame_arch (return_frame);
5733 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5734 sr_sal.section = find_pc_overlay (sr_sal.pc);
5735 sr_sal.pspace = get_frame_program_space (return_frame);
5736
5737 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5738 get_stack_frame_id (return_frame),
5739 bp_hp_step_resume);
5740 }
5741
5742 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5743 is used to skip a function after stepping into it (for "next" or if
5744 the called function has no debugging information).
5745
5746 The current function has almost always been reached by single
5747 stepping a call or return instruction. NEXT_FRAME belongs to the
5748 current function, and the breakpoint will be set at the caller's
5749 resume address.
5750
5751 This is a separate function rather than reusing
5752 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5753 get_prev_frame, which may stop prematurely (see the implementation
5754 of frame_unwind_caller_id for an example). */
5755
5756 static void
5757 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5758 {
5759 struct symtab_and_line sr_sal;
5760 struct gdbarch *gdbarch;
5761
5762 /* We shouldn't have gotten here if we don't know where the call site
5763 is. */
5764 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5765
5766 init_sal (&sr_sal); /* initialize to zeros */
5767
5768 gdbarch = frame_unwind_caller_arch (next_frame);
5769 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5770 frame_unwind_caller_pc (next_frame));
5771 sr_sal.section = find_pc_overlay (sr_sal.pc);
5772 sr_sal.pspace = frame_unwind_program_space (next_frame);
5773
5774 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5775 frame_unwind_caller_id (next_frame));
5776 }
5777
5778 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5779 new breakpoint at the target of a jmp_buf. The handling of
5780 longjmp-resume uses the same mechanisms used for handling
5781 "step-resume" breakpoints. */
5782
5783 static void
5784 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5785 {
5786 /* There should never be more than one longjmp-resume breakpoint per
5787 thread, so we should never be setting a new
5788 longjmp_resume_breakpoint when one is already active. */
5789 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5790
5791 if (debug_infrun)
5792 fprintf_unfiltered (gdb_stdlog,
5793 "infrun: inserting longjmp-resume breakpoint at %s\n",
5794 paddress (gdbarch, pc));
5795
5796 inferior_thread ()->control.exception_resume_breakpoint =
5797 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5798 }
5799
5800 /* Insert an exception resume breakpoint. TP is the thread throwing
5801 the exception. The block B is the block of the unwinder debug hook
5802 function. FRAME is the frame corresponding to the call to this
5803 function. SYM is the symbol of the function argument holding the
5804 target PC of the exception. */
5805
5806 static void
5807 insert_exception_resume_breakpoint (struct thread_info *tp,
5808 const struct block *b,
5809 struct frame_info *frame,
5810 struct symbol *sym)
5811 {
5812 volatile struct gdb_exception e;
5813
5814 /* We want to ignore errors here. */
5815 TRY_CATCH (e, RETURN_MASK_ERROR)
5816 {
5817 struct symbol *vsym;
5818 struct value *value;
5819 CORE_ADDR handler;
5820 struct breakpoint *bp;
5821
5822 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5823 value = read_var_value (vsym, frame);
5824 /* If the value was optimized out, revert to the old behavior. */
5825 if (! value_optimized_out (value))
5826 {
5827 handler = value_as_address (value);
5828
5829 if (debug_infrun)
5830 fprintf_unfiltered (gdb_stdlog,
5831 "infrun: exception resume at %lx\n",
5832 (unsigned long) handler);
5833
5834 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5835 handler, bp_exception_resume);
5836
5837 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5838 frame = NULL;
5839
5840 bp->thread = tp->num;
5841 inferior_thread ()->control.exception_resume_breakpoint = bp;
5842 }
5843 }
5844 }
5845
5846 /* A helper for check_exception_resume that sets an
5847 exception-breakpoint based on a SystemTap probe. */
5848
5849 static void
5850 insert_exception_resume_from_probe (struct thread_info *tp,
5851 const struct bound_probe *probe,
5852 struct frame_info *frame)
5853 {
5854 struct value *arg_value;
5855 CORE_ADDR handler;
5856 struct breakpoint *bp;
5857
5858 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5859 if (!arg_value)
5860 return;
5861
5862 handler = value_as_address (arg_value);
5863
5864 if (debug_infrun)
5865 fprintf_unfiltered (gdb_stdlog,
5866 "infrun: exception resume at %s\n",
5867 paddress (get_objfile_arch (probe->objfile),
5868 handler));
5869
5870 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5871 handler, bp_exception_resume);
5872 bp->thread = tp->num;
5873 inferior_thread ()->control.exception_resume_breakpoint = bp;
5874 }
5875
5876 /* This is called when an exception has been intercepted. Check to
5877 see whether the exception's destination is of interest, and if so,
5878 set an exception resume breakpoint there. */
5879
5880 static void
5881 check_exception_resume (struct execution_control_state *ecs,
5882 struct frame_info *frame)
5883 {
5884 volatile struct gdb_exception e;
5885 struct bound_probe probe;
5886 struct symbol *func;
5887
5888 /* First see if this exception unwinding breakpoint was set via a
5889 SystemTap probe point. If so, the probe has two arguments: the
5890 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5891 set a breakpoint there. */
5892 probe = find_probe_by_pc (get_frame_pc (frame));
5893 if (probe.probe)
5894 {
5895 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5896 return;
5897 }
5898
5899 func = get_frame_function (frame);
5900 if (!func)
5901 return;
5902
5903 TRY_CATCH (e, RETURN_MASK_ERROR)
5904 {
5905 const struct block *b;
5906 struct block_iterator iter;
5907 struct symbol *sym;
5908 int argno = 0;
5909
5910 /* The exception breakpoint is a thread-specific breakpoint on
5911 the unwinder's debug hook, declared as:
5912
5913 void _Unwind_DebugHook (void *cfa, void *handler);
5914
5915 The CFA argument indicates the frame to which control is
5916 about to be transferred. HANDLER is the destination PC.
5917
5918 We ignore the CFA and set a temporary breakpoint at HANDLER.
5919 This is not extremely efficient but it avoids issues in gdb
5920 with computing the DWARF CFA, and it also works even in weird
5921 cases such as throwing an exception from inside a signal
5922 handler. */
5923
5924 b = SYMBOL_BLOCK_VALUE (func);
5925 ALL_BLOCK_SYMBOLS (b, iter, sym)
5926 {
5927 if (!SYMBOL_IS_ARGUMENT (sym))
5928 continue;
5929
5930 if (argno == 0)
5931 ++argno;
5932 else
5933 {
5934 insert_exception_resume_breakpoint (ecs->event_thread,
5935 b, frame, sym);
5936 break;
5937 }
5938 }
5939 }
5940 }
5941
5942 static void
5943 stop_waiting (struct execution_control_state *ecs)
5944 {
5945 if (debug_infrun)
5946 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
5947
5948 clear_step_over_info ();
5949
5950 /* Let callers know we don't want to wait for the inferior anymore. */
5951 ecs->wait_some_more = 0;
5952 }
5953
5954 /* Called when we should continue running the inferior, because the
5955 current event doesn't cause a user visible stop. This does the
5956 resuming part; waiting for the next event is done elsewhere. */
5957
5958 static void
5959 keep_going (struct execution_control_state *ecs)
5960 {
5961 /* Make sure normal_stop is called if we get a QUIT handled before
5962 reaching resume. */
5963 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5964
5965 /* Save the pc before execution, to compare with pc after stop. */
5966 ecs->event_thread->prev_pc
5967 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5968
5969 if (ecs->event_thread->control.trap_expected
5970 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5971 {
5972 /* We haven't yet gotten our trap, and either: intercepted a
5973 non-signal event (e.g., a fork); or took a signal which we
5974 are supposed to pass through to the inferior. Simply
5975 continue. */
5976 discard_cleanups (old_cleanups);
5977 resume (currently_stepping (ecs->event_thread),
5978 ecs->event_thread->suspend.stop_signal);
5979 }
5980 else
5981 {
5982 volatile struct gdb_exception e;
5983 struct regcache *regcache = get_current_regcache ();
5984 int remove_bp;
5985 int remove_wps;
5986
5987 /* Either the trap was not expected, but we are continuing
5988 anyway (if we got a signal, the user asked it be passed to
5989 the child)
5990 -- or --
5991 We got our expected trap, but decided we should resume from
5992 it.
5993
5994 We're going to run this baby now!
5995
5996 Note that insert_breakpoints won't try to re-insert
5997 already inserted breakpoints. Therefore, we don't
5998 care if breakpoints were already inserted, or not. */
5999
6000 /* If we need to step over a breakpoint, and we're not using
6001 displaced stepping to do so, insert all breakpoints
6002 (watchpoints, etc.) but the one we're stepping over, step one
6003 instruction, and then re-insert the breakpoint when that step
6004 is finished. */
6005
6006 remove_bp = (ecs->hit_singlestep_breakpoint
6007 || thread_still_needs_step_over (ecs->event_thread));
6008 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6009 && !target_have_steppable_watchpoint);
6010
6011 if (remove_bp && !use_displaced_stepping (get_regcache_arch (regcache)))
6012 {
6013 set_step_over_info (get_regcache_aspace (regcache),
6014 regcache_read_pc (regcache), remove_wps);
6015 }
6016 else if (remove_wps)
6017 set_step_over_info (NULL, 0, remove_wps);
6018 else
6019 clear_step_over_info ();
6020
6021 /* Stop stepping if inserting breakpoints fails. */
6022 TRY_CATCH (e, RETURN_MASK_ERROR)
6023 {
6024 insert_breakpoints ();
6025 }
6026 if (e.reason < 0)
6027 {
6028 exception_print (gdb_stderr, e);
6029 stop_waiting (ecs);
6030 return;
6031 }
6032
6033 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6034
6035 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6036 explicitly specifies that such a signal should be delivered
6037 to the target program). Typically, that would occur when a
6038 user is debugging a target monitor on a simulator: the target
6039 monitor sets a breakpoint; the simulator encounters this
6040 breakpoint and halts the simulation handing control to GDB;
6041 GDB, noting that the stop address doesn't map to any known
6042 breakpoint, returns control back to the simulator; the
6043 simulator then delivers the hardware equivalent of a
6044 GDB_SIGNAL_TRAP to the program being debugged. */
6045 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6046 && !signal_program[ecs->event_thread->suspend.stop_signal])
6047 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6048
6049 discard_cleanups (old_cleanups);
6050 resume (currently_stepping (ecs->event_thread),
6051 ecs->event_thread->suspend.stop_signal);
6052 }
6053
6054 prepare_to_wait (ecs);
6055 }
6056
6057 /* This function normally comes after a resume, before
6058 handle_inferior_event exits. It takes care of any last bits of
6059 housekeeping, and sets the all-important wait_some_more flag. */
6060
6061 static void
6062 prepare_to_wait (struct execution_control_state *ecs)
6063 {
6064 if (debug_infrun)
6065 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6066
6067 /* This is the old end of the while loop. Let everybody know we
6068 want to wait for the inferior some more and get called again
6069 soon. */
6070 ecs->wait_some_more = 1;
6071 }
6072
6073 /* We are done with the step range of a step/next/si/ni command.
6074 Called once for each n of a "step n" operation. */
6075
6076 static void
6077 end_stepping_range (struct execution_control_state *ecs)
6078 {
6079 ecs->event_thread->control.stop_step = 1;
6080 stop_waiting (ecs);
6081 }
6082
6083 /* Several print_*_reason functions to print why the inferior has stopped.
6084 We always print something when the inferior exits, or receives a signal.
6085 The rest of the cases are dealt with later on in normal_stop and
6086 print_it_typical. Ideally there should be a call to one of these
6087 print_*_reason functions functions from handle_inferior_event each time
6088 stop_waiting is called.
6089
6090 Note that we don't call these directly, instead we delegate that to
6091 the interpreters, through observers. Interpreters then call these
6092 with whatever uiout is right. */
6093
6094 void
6095 print_end_stepping_range_reason (struct ui_out *uiout)
6096 {
6097 /* For CLI-like interpreters, print nothing. */
6098
6099 if (ui_out_is_mi_like_p (uiout))
6100 {
6101 ui_out_field_string (uiout, "reason",
6102 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6103 }
6104 }
6105
6106 void
6107 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6108 {
6109 annotate_signalled ();
6110 if (ui_out_is_mi_like_p (uiout))
6111 ui_out_field_string
6112 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6113 ui_out_text (uiout, "\nProgram terminated with signal ");
6114 annotate_signal_name ();
6115 ui_out_field_string (uiout, "signal-name",
6116 gdb_signal_to_name (siggnal));
6117 annotate_signal_name_end ();
6118 ui_out_text (uiout, ", ");
6119 annotate_signal_string ();
6120 ui_out_field_string (uiout, "signal-meaning",
6121 gdb_signal_to_string (siggnal));
6122 annotate_signal_string_end ();
6123 ui_out_text (uiout, ".\n");
6124 ui_out_text (uiout, "The program no longer exists.\n");
6125 }
6126
6127 void
6128 print_exited_reason (struct ui_out *uiout, int exitstatus)
6129 {
6130 struct inferior *inf = current_inferior ();
6131 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6132
6133 annotate_exited (exitstatus);
6134 if (exitstatus)
6135 {
6136 if (ui_out_is_mi_like_p (uiout))
6137 ui_out_field_string (uiout, "reason",
6138 async_reason_lookup (EXEC_ASYNC_EXITED));
6139 ui_out_text (uiout, "[Inferior ");
6140 ui_out_text (uiout, plongest (inf->num));
6141 ui_out_text (uiout, " (");
6142 ui_out_text (uiout, pidstr);
6143 ui_out_text (uiout, ") exited with code ");
6144 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6145 ui_out_text (uiout, "]\n");
6146 }
6147 else
6148 {
6149 if (ui_out_is_mi_like_p (uiout))
6150 ui_out_field_string
6151 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6152 ui_out_text (uiout, "[Inferior ");
6153 ui_out_text (uiout, plongest (inf->num));
6154 ui_out_text (uiout, " (");
6155 ui_out_text (uiout, pidstr);
6156 ui_out_text (uiout, ") exited normally]\n");
6157 }
6158 }
6159
6160 void
6161 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6162 {
6163 annotate_signal ();
6164
6165 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6166 {
6167 struct thread_info *t = inferior_thread ();
6168
6169 ui_out_text (uiout, "\n[");
6170 ui_out_field_string (uiout, "thread-name",
6171 target_pid_to_str (t->ptid));
6172 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6173 ui_out_text (uiout, " stopped");
6174 }
6175 else
6176 {
6177 ui_out_text (uiout, "\nProgram received signal ");
6178 annotate_signal_name ();
6179 if (ui_out_is_mi_like_p (uiout))
6180 ui_out_field_string
6181 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6182 ui_out_field_string (uiout, "signal-name",
6183 gdb_signal_to_name (siggnal));
6184 annotate_signal_name_end ();
6185 ui_out_text (uiout, ", ");
6186 annotate_signal_string ();
6187 ui_out_field_string (uiout, "signal-meaning",
6188 gdb_signal_to_string (siggnal));
6189 annotate_signal_string_end ();
6190 }
6191 ui_out_text (uiout, ".\n");
6192 }
6193
6194 void
6195 print_no_history_reason (struct ui_out *uiout)
6196 {
6197 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6198 }
6199
6200 /* Print current location without a level number, if we have changed
6201 functions or hit a breakpoint. Print source line if we have one.
6202 bpstat_print contains the logic deciding in detail what to print,
6203 based on the event(s) that just occurred. */
6204
6205 void
6206 print_stop_event (struct target_waitstatus *ws)
6207 {
6208 int bpstat_ret;
6209 int source_flag;
6210 int do_frame_printing = 1;
6211 struct thread_info *tp = inferior_thread ();
6212
6213 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6214 switch (bpstat_ret)
6215 {
6216 case PRINT_UNKNOWN:
6217 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6218 should) carry around the function and does (or should) use
6219 that when doing a frame comparison. */
6220 if (tp->control.stop_step
6221 && frame_id_eq (tp->control.step_frame_id,
6222 get_frame_id (get_current_frame ()))
6223 && step_start_function == find_pc_function (stop_pc))
6224 {
6225 /* Finished step, just print source line. */
6226 source_flag = SRC_LINE;
6227 }
6228 else
6229 {
6230 /* Print location and source line. */
6231 source_flag = SRC_AND_LOC;
6232 }
6233 break;
6234 case PRINT_SRC_AND_LOC:
6235 /* Print location and source line. */
6236 source_flag = SRC_AND_LOC;
6237 break;
6238 case PRINT_SRC_ONLY:
6239 source_flag = SRC_LINE;
6240 break;
6241 case PRINT_NOTHING:
6242 /* Something bogus. */
6243 source_flag = SRC_LINE;
6244 do_frame_printing = 0;
6245 break;
6246 default:
6247 internal_error (__FILE__, __LINE__, _("Unknown value."));
6248 }
6249
6250 /* The behavior of this routine with respect to the source
6251 flag is:
6252 SRC_LINE: Print only source line
6253 LOCATION: Print only location
6254 SRC_AND_LOC: Print location and source line. */
6255 if (do_frame_printing)
6256 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6257
6258 /* Display the auto-display expressions. */
6259 do_displays ();
6260 }
6261
6262 /* Here to return control to GDB when the inferior stops for real.
6263 Print appropriate messages, remove breakpoints, give terminal our modes.
6264
6265 STOP_PRINT_FRAME nonzero means print the executing frame
6266 (pc, function, args, file, line number and line text).
6267 BREAKPOINTS_FAILED nonzero means stop was due to error
6268 attempting to insert breakpoints. */
6269
6270 void
6271 normal_stop (void)
6272 {
6273 struct target_waitstatus last;
6274 ptid_t last_ptid;
6275 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6276
6277 get_last_target_status (&last_ptid, &last);
6278
6279 /* If an exception is thrown from this point on, make sure to
6280 propagate GDB's knowledge of the executing state to the
6281 frontend/user running state. A QUIT is an easy exception to see
6282 here, so do this before any filtered output. */
6283 if (!non_stop)
6284 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6285 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6286 && last.kind != TARGET_WAITKIND_EXITED
6287 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6288 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6289
6290 /* As we're presenting a stop, and potentially removing breakpoints,
6291 update the thread list so we can tell whether there are threads
6292 running on the target. With target remote, for example, we can
6293 only learn about new threads when we explicitly update the thread
6294 list. Do this before notifying the interpreters about signal
6295 stops, end of stepping ranges, etc., so that the "new thread"
6296 output is emitted before e.g., "Program received signal FOO",
6297 instead of after. */
6298 update_thread_list ();
6299
6300 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6301 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6302
6303 /* As with the notification of thread events, we want to delay
6304 notifying the user that we've switched thread context until
6305 the inferior actually stops.
6306
6307 There's no point in saying anything if the inferior has exited.
6308 Note that SIGNALLED here means "exited with a signal", not
6309 "received a signal".
6310
6311 Also skip saying anything in non-stop mode. In that mode, as we
6312 don't want GDB to switch threads behind the user's back, to avoid
6313 races where the user is typing a command to apply to thread x,
6314 but GDB switches to thread y before the user finishes entering
6315 the command, fetch_inferior_event installs a cleanup to restore
6316 the current thread back to the thread the user had selected right
6317 after this event is handled, so we're not really switching, only
6318 informing of a stop. */
6319 if (!non_stop
6320 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6321 && target_has_execution
6322 && last.kind != TARGET_WAITKIND_SIGNALLED
6323 && last.kind != TARGET_WAITKIND_EXITED
6324 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6325 {
6326 target_terminal_ours_for_output ();
6327 printf_filtered (_("[Switching to %s]\n"),
6328 target_pid_to_str (inferior_ptid));
6329 annotate_thread_changed ();
6330 previous_inferior_ptid = inferior_ptid;
6331 }
6332
6333 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6334 {
6335 gdb_assert (sync_execution || !target_can_async_p ());
6336
6337 target_terminal_ours_for_output ();
6338 printf_filtered (_("No unwaited-for children left.\n"));
6339 }
6340
6341 /* Note: this depends on the update_thread_list call above. */
6342 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6343 {
6344 if (remove_breakpoints ())
6345 {
6346 target_terminal_ours_for_output ();
6347 printf_filtered (_("Cannot remove breakpoints because "
6348 "program is no longer writable.\nFurther "
6349 "execution is probably impossible.\n"));
6350 }
6351 }
6352
6353 /* If an auto-display called a function and that got a signal,
6354 delete that auto-display to avoid an infinite recursion. */
6355
6356 if (stopped_by_random_signal)
6357 disable_current_display ();
6358
6359 /* Notify observers if we finished a "step"-like command, etc. */
6360 if (target_has_execution
6361 && last.kind != TARGET_WAITKIND_SIGNALLED
6362 && last.kind != TARGET_WAITKIND_EXITED
6363 && inferior_thread ()->control.stop_step)
6364 {
6365 /* But not if in the middle of doing a "step n" operation for
6366 n > 1 */
6367 if (inferior_thread ()->step_multi)
6368 goto done;
6369
6370 observer_notify_end_stepping_range ();
6371 }
6372
6373 target_terminal_ours ();
6374 async_enable_stdin ();
6375
6376 /* Set the current source location. This will also happen if we
6377 display the frame below, but the current SAL will be incorrect
6378 during a user hook-stop function. */
6379 if (has_stack_frames () && !stop_stack_dummy)
6380 set_current_sal_from_frame (get_current_frame ());
6381
6382 /* Let the user/frontend see the threads as stopped, but do nothing
6383 if the thread was running an infcall. We may be e.g., evaluating
6384 a breakpoint condition. In that case, the thread had state
6385 THREAD_RUNNING before the infcall, and shall remain set to
6386 running, all without informing the user/frontend about state
6387 transition changes. If this is actually a call command, then the
6388 thread was originally already stopped, so there's no state to
6389 finish either. */
6390 if (target_has_execution && inferior_thread ()->control.in_infcall)
6391 discard_cleanups (old_chain);
6392 else
6393 do_cleanups (old_chain);
6394
6395 /* Look up the hook_stop and run it (CLI internally handles problem
6396 of stop_command's pre-hook not existing). */
6397 if (stop_command)
6398 catch_errors (hook_stop_stub, stop_command,
6399 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6400
6401 if (!has_stack_frames ())
6402 goto done;
6403
6404 if (last.kind == TARGET_WAITKIND_SIGNALLED
6405 || last.kind == TARGET_WAITKIND_EXITED)
6406 goto done;
6407
6408 /* Select innermost stack frame - i.e., current frame is frame 0,
6409 and current location is based on that.
6410 Don't do this on return from a stack dummy routine,
6411 or if the program has exited. */
6412
6413 if (!stop_stack_dummy)
6414 {
6415 select_frame (get_current_frame ());
6416
6417 /* If --batch-silent is enabled then there's no need to print the current
6418 source location, and to try risks causing an error message about
6419 missing source files. */
6420 if (stop_print_frame && !batch_silent)
6421 print_stop_event (&last);
6422 }
6423
6424 /* Save the function value return registers, if we care.
6425 We might be about to restore their previous contents. */
6426 if (inferior_thread ()->control.proceed_to_finish
6427 && execution_direction != EXEC_REVERSE)
6428 {
6429 /* This should not be necessary. */
6430 if (stop_registers)
6431 regcache_xfree (stop_registers);
6432
6433 /* NB: The copy goes through to the target picking up the value of
6434 all the registers. */
6435 stop_registers = regcache_dup (get_current_regcache ());
6436 }
6437
6438 if (stop_stack_dummy == STOP_STACK_DUMMY)
6439 {
6440 /* Pop the empty frame that contains the stack dummy.
6441 This also restores inferior state prior to the call
6442 (struct infcall_suspend_state). */
6443 struct frame_info *frame = get_current_frame ();
6444
6445 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6446 frame_pop (frame);
6447 /* frame_pop() calls reinit_frame_cache as the last thing it
6448 does which means there's currently no selected frame. We
6449 don't need to re-establish a selected frame if the dummy call
6450 returns normally, that will be done by
6451 restore_infcall_control_state. However, we do have to handle
6452 the case where the dummy call is returning after being
6453 stopped (e.g. the dummy call previously hit a breakpoint).
6454 We can't know which case we have so just always re-establish
6455 a selected frame here. */
6456 select_frame (get_current_frame ());
6457 }
6458
6459 done:
6460 annotate_stopped ();
6461
6462 /* Suppress the stop observer if we're in the middle of:
6463
6464 - a step n (n > 1), as there still more steps to be done.
6465
6466 - a "finish" command, as the observer will be called in
6467 finish_command_continuation, so it can include the inferior
6468 function's return value.
6469
6470 - calling an inferior function, as we pretend we inferior didn't
6471 run at all. The return value of the call is handled by the
6472 expression evaluator, through call_function_by_hand. */
6473
6474 if (!target_has_execution
6475 || last.kind == TARGET_WAITKIND_SIGNALLED
6476 || last.kind == TARGET_WAITKIND_EXITED
6477 || last.kind == TARGET_WAITKIND_NO_RESUMED
6478 || (!(inferior_thread ()->step_multi
6479 && inferior_thread ()->control.stop_step)
6480 && !(inferior_thread ()->control.stop_bpstat
6481 && inferior_thread ()->control.proceed_to_finish)
6482 && !inferior_thread ()->control.in_infcall))
6483 {
6484 if (!ptid_equal (inferior_ptid, null_ptid))
6485 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6486 stop_print_frame);
6487 else
6488 observer_notify_normal_stop (NULL, stop_print_frame);
6489 }
6490
6491 if (target_has_execution)
6492 {
6493 if (last.kind != TARGET_WAITKIND_SIGNALLED
6494 && last.kind != TARGET_WAITKIND_EXITED)
6495 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6496 Delete any breakpoint that is to be deleted at the next stop. */
6497 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6498 }
6499
6500 /* Try to get rid of automatically added inferiors that are no
6501 longer needed. Keeping those around slows down things linearly.
6502 Note that this never removes the current inferior. */
6503 prune_inferiors ();
6504 }
6505
6506 static int
6507 hook_stop_stub (void *cmd)
6508 {
6509 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6510 return (0);
6511 }
6512 \f
6513 int
6514 signal_stop_state (int signo)
6515 {
6516 return signal_stop[signo];
6517 }
6518
6519 int
6520 signal_print_state (int signo)
6521 {
6522 return signal_print[signo];
6523 }
6524
6525 int
6526 signal_pass_state (int signo)
6527 {
6528 return signal_program[signo];
6529 }
6530
6531 static void
6532 signal_cache_update (int signo)
6533 {
6534 if (signo == -1)
6535 {
6536 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6537 signal_cache_update (signo);
6538
6539 return;
6540 }
6541
6542 signal_pass[signo] = (signal_stop[signo] == 0
6543 && signal_print[signo] == 0
6544 && signal_program[signo] == 1
6545 && signal_catch[signo] == 0);
6546 }
6547
6548 int
6549 signal_stop_update (int signo, int state)
6550 {
6551 int ret = signal_stop[signo];
6552
6553 signal_stop[signo] = state;
6554 signal_cache_update (signo);
6555 return ret;
6556 }
6557
6558 int
6559 signal_print_update (int signo, int state)
6560 {
6561 int ret = signal_print[signo];
6562
6563 signal_print[signo] = state;
6564 signal_cache_update (signo);
6565 return ret;
6566 }
6567
6568 int
6569 signal_pass_update (int signo, int state)
6570 {
6571 int ret = signal_program[signo];
6572
6573 signal_program[signo] = state;
6574 signal_cache_update (signo);
6575 return ret;
6576 }
6577
6578 /* Update the global 'signal_catch' from INFO and notify the
6579 target. */
6580
6581 void
6582 signal_catch_update (const unsigned int *info)
6583 {
6584 int i;
6585
6586 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6587 signal_catch[i] = info[i] > 0;
6588 signal_cache_update (-1);
6589 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6590 }
6591
6592 static void
6593 sig_print_header (void)
6594 {
6595 printf_filtered (_("Signal Stop\tPrint\tPass "
6596 "to program\tDescription\n"));
6597 }
6598
6599 static void
6600 sig_print_info (enum gdb_signal oursig)
6601 {
6602 const char *name = gdb_signal_to_name (oursig);
6603 int name_padding = 13 - strlen (name);
6604
6605 if (name_padding <= 0)
6606 name_padding = 0;
6607
6608 printf_filtered ("%s", name);
6609 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6610 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6611 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6612 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6613 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6614 }
6615
6616 /* Specify how various signals in the inferior should be handled. */
6617
6618 static void
6619 handle_command (char *args, int from_tty)
6620 {
6621 char **argv;
6622 int digits, wordlen;
6623 int sigfirst, signum, siglast;
6624 enum gdb_signal oursig;
6625 int allsigs;
6626 int nsigs;
6627 unsigned char *sigs;
6628 struct cleanup *old_chain;
6629
6630 if (args == NULL)
6631 {
6632 error_no_arg (_("signal to handle"));
6633 }
6634
6635 /* Allocate and zero an array of flags for which signals to handle. */
6636
6637 nsigs = (int) GDB_SIGNAL_LAST;
6638 sigs = (unsigned char *) alloca (nsigs);
6639 memset (sigs, 0, nsigs);
6640
6641 /* Break the command line up into args. */
6642
6643 argv = gdb_buildargv (args);
6644 old_chain = make_cleanup_freeargv (argv);
6645
6646 /* Walk through the args, looking for signal oursigs, signal names, and
6647 actions. Signal numbers and signal names may be interspersed with
6648 actions, with the actions being performed for all signals cumulatively
6649 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6650
6651 while (*argv != NULL)
6652 {
6653 wordlen = strlen (*argv);
6654 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6655 {;
6656 }
6657 allsigs = 0;
6658 sigfirst = siglast = -1;
6659
6660 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6661 {
6662 /* Apply action to all signals except those used by the
6663 debugger. Silently skip those. */
6664 allsigs = 1;
6665 sigfirst = 0;
6666 siglast = nsigs - 1;
6667 }
6668 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6669 {
6670 SET_SIGS (nsigs, sigs, signal_stop);
6671 SET_SIGS (nsigs, sigs, signal_print);
6672 }
6673 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6674 {
6675 UNSET_SIGS (nsigs, sigs, signal_program);
6676 }
6677 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6678 {
6679 SET_SIGS (nsigs, sigs, signal_print);
6680 }
6681 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6682 {
6683 SET_SIGS (nsigs, sigs, signal_program);
6684 }
6685 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6686 {
6687 UNSET_SIGS (nsigs, sigs, signal_stop);
6688 }
6689 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6690 {
6691 SET_SIGS (nsigs, sigs, signal_program);
6692 }
6693 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6694 {
6695 UNSET_SIGS (nsigs, sigs, signal_print);
6696 UNSET_SIGS (nsigs, sigs, signal_stop);
6697 }
6698 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6699 {
6700 UNSET_SIGS (nsigs, sigs, signal_program);
6701 }
6702 else if (digits > 0)
6703 {
6704 /* It is numeric. The numeric signal refers to our own
6705 internal signal numbering from target.h, not to host/target
6706 signal number. This is a feature; users really should be
6707 using symbolic names anyway, and the common ones like
6708 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6709
6710 sigfirst = siglast = (int)
6711 gdb_signal_from_command (atoi (*argv));
6712 if ((*argv)[digits] == '-')
6713 {
6714 siglast = (int)
6715 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6716 }
6717 if (sigfirst > siglast)
6718 {
6719 /* Bet he didn't figure we'd think of this case... */
6720 signum = sigfirst;
6721 sigfirst = siglast;
6722 siglast = signum;
6723 }
6724 }
6725 else
6726 {
6727 oursig = gdb_signal_from_name (*argv);
6728 if (oursig != GDB_SIGNAL_UNKNOWN)
6729 {
6730 sigfirst = siglast = (int) oursig;
6731 }
6732 else
6733 {
6734 /* Not a number and not a recognized flag word => complain. */
6735 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6736 }
6737 }
6738
6739 /* If any signal numbers or symbol names were found, set flags for
6740 which signals to apply actions to. */
6741
6742 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6743 {
6744 switch ((enum gdb_signal) signum)
6745 {
6746 case GDB_SIGNAL_TRAP:
6747 case GDB_SIGNAL_INT:
6748 if (!allsigs && !sigs[signum])
6749 {
6750 if (query (_("%s is used by the debugger.\n\
6751 Are you sure you want to change it? "),
6752 gdb_signal_to_name ((enum gdb_signal) signum)))
6753 {
6754 sigs[signum] = 1;
6755 }
6756 else
6757 {
6758 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6759 gdb_flush (gdb_stdout);
6760 }
6761 }
6762 break;
6763 case GDB_SIGNAL_0:
6764 case GDB_SIGNAL_DEFAULT:
6765 case GDB_SIGNAL_UNKNOWN:
6766 /* Make sure that "all" doesn't print these. */
6767 break;
6768 default:
6769 sigs[signum] = 1;
6770 break;
6771 }
6772 }
6773
6774 argv++;
6775 }
6776
6777 for (signum = 0; signum < nsigs; signum++)
6778 if (sigs[signum])
6779 {
6780 signal_cache_update (-1);
6781 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6782 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6783
6784 if (from_tty)
6785 {
6786 /* Show the results. */
6787 sig_print_header ();
6788 for (; signum < nsigs; signum++)
6789 if (sigs[signum])
6790 sig_print_info (signum);
6791 }
6792
6793 break;
6794 }
6795
6796 do_cleanups (old_chain);
6797 }
6798
6799 /* Complete the "handle" command. */
6800
6801 static VEC (char_ptr) *
6802 handle_completer (struct cmd_list_element *ignore,
6803 const char *text, const char *word)
6804 {
6805 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6806 static const char * const keywords[] =
6807 {
6808 "all",
6809 "stop",
6810 "ignore",
6811 "print",
6812 "pass",
6813 "nostop",
6814 "noignore",
6815 "noprint",
6816 "nopass",
6817 NULL,
6818 };
6819
6820 vec_signals = signal_completer (ignore, text, word);
6821 vec_keywords = complete_on_enum (keywords, word, word);
6822
6823 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6824 VEC_free (char_ptr, vec_signals);
6825 VEC_free (char_ptr, vec_keywords);
6826 return return_val;
6827 }
6828
6829 static void
6830 xdb_handle_command (char *args, int from_tty)
6831 {
6832 char **argv;
6833 struct cleanup *old_chain;
6834
6835 if (args == NULL)
6836 error_no_arg (_("xdb command"));
6837
6838 /* Break the command line up into args. */
6839
6840 argv = gdb_buildargv (args);
6841 old_chain = make_cleanup_freeargv (argv);
6842 if (argv[1] != (char *) NULL)
6843 {
6844 char *argBuf;
6845 int bufLen;
6846
6847 bufLen = strlen (argv[0]) + 20;
6848 argBuf = (char *) xmalloc (bufLen);
6849 if (argBuf)
6850 {
6851 int validFlag = 1;
6852 enum gdb_signal oursig;
6853
6854 oursig = gdb_signal_from_name (argv[0]);
6855 memset (argBuf, 0, bufLen);
6856 if (strcmp (argv[1], "Q") == 0)
6857 sprintf (argBuf, "%s %s", argv[0], "noprint");
6858 else
6859 {
6860 if (strcmp (argv[1], "s") == 0)
6861 {
6862 if (!signal_stop[oursig])
6863 sprintf (argBuf, "%s %s", argv[0], "stop");
6864 else
6865 sprintf (argBuf, "%s %s", argv[0], "nostop");
6866 }
6867 else if (strcmp (argv[1], "i") == 0)
6868 {
6869 if (!signal_program[oursig])
6870 sprintf (argBuf, "%s %s", argv[0], "pass");
6871 else
6872 sprintf (argBuf, "%s %s", argv[0], "nopass");
6873 }
6874 else if (strcmp (argv[1], "r") == 0)
6875 {
6876 if (!signal_print[oursig])
6877 sprintf (argBuf, "%s %s", argv[0], "print");
6878 else
6879 sprintf (argBuf, "%s %s", argv[0], "noprint");
6880 }
6881 else
6882 validFlag = 0;
6883 }
6884 if (validFlag)
6885 handle_command (argBuf, from_tty);
6886 else
6887 printf_filtered (_("Invalid signal handling flag.\n"));
6888 if (argBuf)
6889 xfree (argBuf);
6890 }
6891 }
6892 do_cleanups (old_chain);
6893 }
6894
6895 enum gdb_signal
6896 gdb_signal_from_command (int num)
6897 {
6898 if (num >= 1 && num <= 15)
6899 return (enum gdb_signal) num;
6900 error (_("Only signals 1-15 are valid as numeric signals.\n\
6901 Use \"info signals\" for a list of symbolic signals."));
6902 }
6903
6904 /* Print current contents of the tables set by the handle command.
6905 It is possible we should just be printing signals actually used
6906 by the current target (but for things to work right when switching
6907 targets, all signals should be in the signal tables). */
6908
6909 static void
6910 signals_info (char *signum_exp, int from_tty)
6911 {
6912 enum gdb_signal oursig;
6913
6914 sig_print_header ();
6915
6916 if (signum_exp)
6917 {
6918 /* First see if this is a symbol name. */
6919 oursig = gdb_signal_from_name (signum_exp);
6920 if (oursig == GDB_SIGNAL_UNKNOWN)
6921 {
6922 /* No, try numeric. */
6923 oursig =
6924 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6925 }
6926 sig_print_info (oursig);
6927 return;
6928 }
6929
6930 printf_filtered ("\n");
6931 /* These ugly casts brought to you by the native VAX compiler. */
6932 for (oursig = GDB_SIGNAL_FIRST;
6933 (int) oursig < (int) GDB_SIGNAL_LAST;
6934 oursig = (enum gdb_signal) ((int) oursig + 1))
6935 {
6936 QUIT;
6937
6938 if (oursig != GDB_SIGNAL_UNKNOWN
6939 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6940 sig_print_info (oursig);
6941 }
6942
6943 printf_filtered (_("\nUse the \"handle\" command "
6944 "to change these tables.\n"));
6945 }
6946
6947 /* Check if it makes sense to read $_siginfo from the current thread
6948 at this point. If not, throw an error. */
6949
6950 static void
6951 validate_siginfo_access (void)
6952 {
6953 /* No current inferior, no siginfo. */
6954 if (ptid_equal (inferior_ptid, null_ptid))
6955 error (_("No thread selected."));
6956
6957 /* Don't try to read from a dead thread. */
6958 if (is_exited (inferior_ptid))
6959 error (_("The current thread has terminated"));
6960
6961 /* ... or from a spinning thread. */
6962 if (is_running (inferior_ptid))
6963 error (_("Selected thread is running."));
6964 }
6965
6966 /* The $_siginfo convenience variable is a bit special. We don't know
6967 for sure the type of the value until we actually have a chance to
6968 fetch the data. The type can change depending on gdbarch, so it is
6969 also dependent on which thread you have selected.
6970
6971 1. making $_siginfo be an internalvar that creates a new value on
6972 access.
6973
6974 2. making the value of $_siginfo be an lval_computed value. */
6975
6976 /* This function implements the lval_computed support for reading a
6977 $_siginfo value. */
6978
6979 static void
6980 siginfo_value_read (struct value *v)
6981 {
6982 LONGEST transferred;
6983
6984 validate_siginfo_access ();
6985
6986 transferred =
6987 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6988 NULL,
6989 value_contents_all_raw (v),
6990 value_offset (v),
6991 TYPE_LENGTH (value_type (v)));
6992
6993 if (transferred != TYPE_LENGTH (value_type (v)))
6994 error (_("Unable to read siginfo"));
6995 }
6996
6997 /* This function implements the lval_computed support for writing a
6998 $_siginfo value. */
6999
7000 static void
7001 siginfo_value_write (struct value *v, struct value *fromval)
7002 {
7003 LONGEST transferred;
7004
7005 validate_siginfo_access ();
7006
7007 transferred = target_write (&current_target,
7008 TARGET_OBJECT_SIGNAL_INFO,
7009 NULL,
7010 value_contents_all_raw (fromval),
7011 value_offset (v),
7012 TYPE_LENGTH (value_type (fromval)));
7013
7014 if (transferred != TYPE_LENGTH (value_type (fromval)))
7015 error (_("Unable to write siginfo"));
7016 }
7017
7018 static const struct lval_funcs siginfo_value_funcs =
7019 {
7020 siginfo_value_read,
7021 siginfo_value_write
7022 };
7023
7024 /* Return a new value with the correct type for the siginfo object of
7025 the current thread using architecture GDBARCH. Return a void value
7026 if there's no object available. */
7027
7028 static struct value *
7029 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7030 void *ignore)
7031 {
7032 if (target_has_stack
7033 && !ptid_equal (inferior_ptid, null_ptid)
7034 && gdbarch_get_siginfo_type_p (gdbarch))
7035 {
7036 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7037
7038 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7039 }
7040
7041 return allocate_value (builtin_type (gdbarch)->builtin_void);
7042 }
7043
7044 \f
7045 /* infcall_suspend_state contains state about the program itself like its
7046 registers and any signal it received when it last stopped.
7047 This state must be restored regardless of how the inferior function call
7048 ends (either successfully, or after it hits a breakpoint or signal)
7049 if the program is to properly continue where it left off. */
7050
7051 struct infcall_suspend_state
7052 {
7053 struct thread_suspend_state thread_suspend;
7054 #if 0 /* Currently unused and empty structures are not valid C. */
7055 struct inferior_suspend_state inferior_suspend;
7056 #endif
7057
7058 /* Other fields: */
7059 CORE_ADDR stop_pc;
7060 struct regcache *registers;
7061
7062 /* Format of SIGINFO_DATA or NULL if it is not present. */
7063 struct gdbarch *siginfo_gdbarch;
7064
7065 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7066 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7067 content would be invalid. */
7068 gdb_byte *siginfo_data;
7069 };
7070
7071 struct infcall_suspend_state *
7072 save_infcall_suspend_state (void)
7073 {
7074 struct infcall_suspend_state *inf_state;
7075 struct thread_info *tp = inferior_thread ();
7076 #if 0
7077 struct inferior *inf = current_inferior ();
7078 #endif
7079 struct regcache *regcache = get_current_regcache ();
7080 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7081 gdb_byte *siginfo_data = NULL;
7082
7083 if (gdbarch_get_siginfo_type_p (gdbarch))
7084 {
7085 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7086 size_t len = TYPE_LENGTH (type);
7087 struct cleanup *back_to;
7088
7089 siginfo_data = xmalloc (len);
7090 back_to = make_cleanup (xfree, siginfo_data);
7091
7092 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7093 siginfo_data, 0, len) == len)
7094 discard_cleanups (back_to);
7095 else
7096 {
7097 /* Errors ignored. */
7098 do_cleanups (back_to);
7099 siginfo_data = NULL;
7100 }
7101 }
7102
7103 inf_state = XCNEW (struct infcall_suspend_state);
7104
7105 if (siginfo_data)
7106 {
7107 inf_state->siginfo_gdbarch = gdbarch;
7108 inf_state->siginfo_data = siginfo_data;
7109 }
7110
7111 inf_state->thread_suspend = tp->suspend;
7112 #if 0 /* Currently unused and empty structures are not valid C. */
7113 inf_state->inferior_suspend = inf->suspend;
7114 #endif
7115
7116 /* run_inferior_call will not use the signal due to its `proceed' call with
7117 GDB_SIGNAL_0 anyway. */
7118 tp->suspend.stop_signal = GDB_SIGNAL_0;
7119
7120 inf_state->stop_pc = stop_pc;
7121
7122 inf_state->registers = regcache_dup (regcache);
7123
7124 return inf_state;
7125 }
7126
7127 /* Restore inferior session state to INF_STATE. */
7128
7129 void
7130 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7131 {
7132 struct thread_info *tp = inferior_thread ();
7133 #if 0
7134 struct inferior *inf = current_inferior ();
7135 #endif
7136 struct regcache *regcache = get_current_regcache ();
7137 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7138
7139 tp->suspend = inf_state->thread_suspend;
7140 #if 0 /* Currently unused and empty structures are not valid C. */
7141 inf->suspend = inf_state->inferior_suspend;
7142 #endif
7143
7144 stop_pc = inf_state->stop_pc;
7145
7146 if (inf_state->siginfo_gdbarch == gdbarch)
7147 {
7148 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7149
7150 /* Errors ignored. */
7151 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7152 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7153 }
7154
7155 /* The inferior can be gone if the user types "print exit(0)"
7156 (and perhaps other times). */
7157 if (target_has_execution)
7158 /* NB: The register write goes through to the target. */
7159 regcache_cpy (regcache, inf_state->registers);
7160
7161 discard_infcall_suspend_state (inf_state);
7162 }
7163
7164 static void
7165 do_restore_infcall_suspend_state_cleanup (void *state)
7166 {
7167 restore_infcall_suspend_state (state);
7168 }
7169
7170 struct cleanup *
7171 make_cleanup_restore_infcall_suspend_state
7172 (struct infcall_suspend_state *inf_state)
7173 {
7174 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7175 }
7176
7177 void
7178 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7179 {
7180 regcache_xfree (inf_state->registers);
7181 xfree (inf_state->siginfo_data);
7182 xfree (inf_state);
7183 }
7184
7185 struct regcache *
7186 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7187 {
7188 return inf_state->registers;
7189 }
7190
7191 /* infcall_control_state contains state regarding gdb's control of the
7192 inferior itself like stepping control. It also contains session state like
7193 the user's currently selected frame. */
7194
7195 struct infcall_control_state
7196 {
7197 struct thread_control_state thread_control;
7198 struct inferior_control_state inferior_control;
7199
7200 /* Other fields: */
7201 enum stop_stack_kind stop_stack_dummy;
7202 int stopped_by_random_signal;
7203 int stop_after_trap;
7204
7205 /* ID if the selected frame when the inferior function call was made. */
7206 struct frame_id selected_frame_id;
7207 };
7208
7209 /* Save all of the information associated with the inferior<==>gdb
7210 connection. */
7211
7212 struct infcall_control_state *
7213 save_infcall_control_state (void)
7214 {
7215 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7216 struct thread_info *tp = inferior_thread ();
7217 struct inferior *inf = current_inferior ();
7218
7219 inf_status->thread_control = tp->control;
7220 inf_status->inferior_control = inf->control;
7221
7222 tp->control.step_resume_breakpoint = NULL;
7223 tp->control.exception_resume_breakpoint = NULL;
7224
7225 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7226 chain. If caller's caller is walking the chain, they'll be happier if we
7227 hand them back the original chain when restore_infcall_control_state is
7228 called. */
7229 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7230
7231 /* Other fields: */
7232 inf_status->stop_stack_dummy = stop_stack_dummy;
7233 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7234 inf_status->stop_after_trap = stop_after_trap;
7235
7236 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7237
7238 return inf_status;
7239 }
7240
7241 static int
7242 restore_selected_frame (void *args)
7243 {
7244 struct frame_id *fid = (struct frame_id *) args;
7245 struct frame_info *frame;
7246
7247 frame = frame_find_by_id (*fid);
7248
7249 /* If inf_status->selected_frame_id is NULL, there was no previously
7250 selected frame. */
7251 if (frame == NULL)
7252 {
7253 warning (_("Unable to restore previously selected frame."));
7254 return 0;
7255 }
7256
7257 select_frame (frame);
7258
7259 return (1);
7260 }
7261
7262 /* Restore inferior session state to INF_STATUS. */
7263
7264 void
7265 restore_infcall_control_state (struct infcall_control_state *inf_status)
7266 {
7267 struct thread_info *tp = inferior_thread ();
7268 struct inferior *inf = current_inferior ();
7269
7270 if (tp->control.step_resume_breakpoint)
7271 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7272
7273 if (tp->control.exception_resume_breakpoint)
7274 tp->control.exception_resume_breakpoint->disposition
7275 = disp_del_at_next_stop;
7276
7277 /* Handle the bpstat_copy of the chain. */
7278 bpstat_clear (&tp->control.stop_bpstat);
7279
7280 tp->control = inf_status->thread_control;
7281 inf->control = inf_status->inferior_control;
7282
7283 /* Other fields: */
7284 stop_stack_dummy = inf_status->stop_stack_dummy;
7285 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7286 stop_after_trap = inf_status->stop_after_trap;
7287
7288 if (target_has_stack)
7289 {
7290 /* The point of catch_errors is that if the stack is clobbered,
7291 walking the stack might encounter a garbage pointer and
7292 error() trying to dereference it. */
7293 if (catch_errors
7294 (restore_selected_frame, &inf_status->selected_frame_id,
7295 "Unable to restore previously selected frame:\n",
7296 RETURN_MASK_ERROR) == 0)
7297 /* Error in restoring the selected frame. Select the innermost
7298 frame. */
7299 select_frame (get_current_frame ());
7300 }
7301
7302 xfree (inf_status);
7303 }
7304
7305 static void
7306 do_restore_infcall_control_state_cleanup (void *sts)
7307 {
7308 restore_infcall_control_state (sts);
7309 }
7310
7311 struct cleanup *
7312 make_cleanup_restore_infcall_control_state
7313 (struct infcall_control_state *inf_status)
7314 {
7315 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7316 }
7317
7318 void
7319 discard_infcall_control_state (struct infcall_control_state *inf_status)
7320 {
7321 if (inf_status->thread_control.step_resume_breakpoint)
7322 inf_status->thread_control.step_resume_breakpoint->disposition
7323 = disp_del_at_next_stop;
7324
7325 if (inf_status->thread_control.exception_resume_breakpoint)
7326 inf_status->thread_control.exception_resume_breakpoint->disposition
7327 = disp_del_at_next_stop;
7328
7329 /* See save_infcall_control_state for info on stop_bpstat. */
7330 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7331
7332 xfree (inf_status);
7333 }
7334 \f
7335 /* restore_inferior_ptid() will be used by the cleanup machinery
7336 to restore the inferior_ptid value saved in a call to
7337 save_inferior_ptid(). */
7338
7339 static void
7340 restore_inferior_ptid (void *arg)
7341 {
7342 ptid_t *saved_ptid_ptr = arg;
7343
7344 inferior_ptid = *saved_ptid_ptr;
7345 xfree (arg);
7346 }
7347
7348 /* Save the value of inferior_ptid so that it may be restored by a
7349 later call to do_cleanups(). Returns the struct cleanup pointer
7350 needed for later doing the cleanup. */
7351
7352 struct cleanup *
7353 save_inferior_ptid (void)
7354 {
7355 ptid_t *saved_ptid_ptr;
7356
7357 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7358 *saved_ptid_ptr = inferior_ptid;
7359 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7360 }
7361
7362 /* See infrun.h. */
7363
7364 void
7365 clear_exit_convenience_vars (void)
7366 {
7367 clear_internalvar (lookup_internalvar ("_exitsignal"));
7368 clear_internalvar (lookup_internalvar ("_exitcode"));
7369 }
7370 \f
7371
7372 /* User interface for reverse debugging:
7373 Set exec-direction / show exec-direction commands
7374 (returns error unless target implements to_set_exec_direction method). */
7375
7376 int execution_direction = EXEC_FORWARD;
7377 static const char exec_forward[] = "forward";
7378 static const char exec_reverse[] = "reverse";
7379 static const char *exec_direction = exec_forward;
7380 static const char *const exec_direction_names[] = {
7381 exec_forward,
7382 exec_reverse,
7383 NULL
7384 };
7385
7386 static void
7387 set_exec_direction_func (char *args, int from_tty,
7388 struct cmd_list_element *cmd)
7389 {
7390 if (target_can_execute_reverse)
7391 {
7392 if (!strcmp (exec_direction, exec_forward))
7393 execution_direction = EXEC_FORWARD;
7394 else if (!strcmp (exec_direction, exec_reverse))
7395 execution_direction = EXEC_REVERSE;
7396 }
7397 else
7398 {
7399 exec_direction = exec_forward;
7400 error (_("Target does not support this operation."));
7401 }
7402 }
7403
7404 static void
7405 show_exec_direction_func (struct ui_file *out, int from_tty,
7406 struct cmd_list_element *cmd, const char *value)
7407 {
7408 switch (execution_direction) {
7409 case EXEC_FORWARD:
7410 fprintf_filtered (out, _("Forward.\n"));
7411 break;
7412 case EXEC_REVERSE:
7413 fprintf_filtered (out, _("Reverse.\n"));
7414 break;
7415 default:
7416 internal_error (__FILE__, __LINE__,
7417 _("bogus execution_direction value: %d"),
7418 (int) execution_direction);
7419 }
7420 }
7421
7422 static void
7423 show_schedule_multiple (struct ui_file *file, int from_tty,
7424 struct cmd_list_element *c, const char *value)
7425 {
7426 fprintf_filtered (file, _("Resuming the execution of threads "
7427 "of all processes is %s.\n"), value);
7428 }
7429
7430 /* Implementation of `siginfo' variable. */
7431
7432 static const struct internalvar_funcs siginfo_funcs =
7433 {
7434 siginfo_make_value,
7435 NULL,
7436 NULL
7437 };
7438
7439 void
7440 _initialize_infrun (void)
7441 {
7442 int i;
7443 int numsigs;
7444 struct cmd_list_element *c;
7445
7446 add_info ("signals", signals_info, _("\
7447 What debugger does when program gets various signals.\n\
7448 Specify a signal as argument to print info on that signal only."));
7449 add_info_alias ("handle", "signals", 0);
7450
7451 c = add_com ("handle", class_run, handle_command, _("\
7452 Specify how to handle signals.\n\
7453 Usage: handle SIGNAL [ACTIONS]\n\
7454 Args are signals and actions to apply to those signals.\n\
7455 If no actions are specified, the current settings for the specified signals\n\
7456 will be displayed instead.\n\
7457 \n\
7458 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7459 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7460 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7461 The special arg \"all\" is recognized to mean all signals except those\n\
7462 used by the debugger, typically SIGTRAP and SIGINT.\n\
7463 \n\
7464 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7465 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7466 Stop means reenter debugger if this signal happens (implies print).\n\
7467 Print means print a message if this signal happens.\n\
7468 Pass means let program see this signal; otherwise program doesn't know.\n\
7469 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7470 Pass and Stop may be combined.\n\
7471 \n\
7472 Multiple signals may be specified. Signal numbers and signal names\n\
7473 may be interspersed with actions, with the actions being performed for\n\
7474 all signals cumulatively specified."));
7475 set_cmd_completer (c, handle_completer);
7476
7477 if (xdb_commands)
7478 {
7479 add_com ("lz", class_info, signals_info, _("\
7480 What debugger does when program gets various signals.\n\
7481 Specify a signal as argument to print info on that signal only."));
7482 add_com ("z", class_run, xdb_handle_command, _("\
7483 Specify how to handle a signal.\n\
7484 Args are signals and actions to apply to those signals.\n\
7485 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7486 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7487 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7488 The special arg \"all\" is recognized to mean all signals except those\n\
7489 used by the debugger, typically SIGTRAP and SIGINT.\n\
7490 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7491 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7492 nopass), \"Q\" (noprint)\n\
7493 Stop means reenter debugger if this signal happens (implies print).\n\
7494 Print means print a message if this signal happens.\n\
7495 Pass means let program see this signal; otherwise program doesn't know.\n\
7496 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7497 Pass and Stop may be combined."));
7498 }
7499
7500 if (!dbx_commands)
7501 stop_command = add_cmd ("stop", class_obscure,
7502 not_just_help_class_command, _("\
7503 There is no `stop' command, but you can set a hook on `stop'.\n\
7504 This allows you to set a list of commands to be run each time execution\n\
7505 of the program stops."), &cmdlist);
7506
7507 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7508 Set inferior debugging."), _("\
7509 Show inferior debugging."), _("\
7510 When non-zero, inferior specific debugging is enabled."),
7511 NULL,
7512 show_debug_infrun,
7513 &setdebuglist, &showdebuglist);
7514
7515 add_setshow_boolean_cmd ("displaced", class_maintenance,
7516 &debug_displaced, _("\
7517 Set displaced stepping debugging."), _("\
7518 Show displaced stepping debugging."), _("\
7519 When non-zero, displaced stepping specific debugging is enabled."),
7520 NULL,
7521 show_debug_displaced,
7522 &setdebuglist, &showdebuglist);
7523
7524 add_setshow_boolean_cmd ("non-stop", no_class,
7525 &non_stop_1, _("\
7526 Set whether gdb controls the inferior in non-stop mode."), _("\
7527 Show whether gdb controls the inferior in non-stop mode."), _("\
7528 When debugging a multi-threaded program and this setting is\n\
7529 off (the default, also called all-stop mode), when one thread stops\n\
7530 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7531 all other threads in the program while you interact with the thread of\n\
7532 interest. When you continue or step a thread, you can allow the other\n\
7533 threads to run, or have them remain stopped, but while you inspect any\n\
7534 thread's state, all threads stop.\n\
7535 \n\
7536 In non-stop mode, when one thread stops, other threads can continue\n\
7537 to run freely. You'll be able to step each thread independently,\n\
7538 leave it stopped or free to run as needed."),
7539 set_non_stop,
7540 show_non_stop,
7541 &setlist,
7542 &showlist);
7543
7544 numsigs = (int) GDB_SIGNAL_LAST;
7545 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7546 signal_print = (unsigned char *)
7547 xmalloc (sizeof (signal_print[0]) * numsigs);
7548 signal_program = (unsigned char *)
7549 xmalloc (sizeof (signal_program[0]) * numsigs);
7550 signal_catch = (unsigned char *)
7551 xmalloc (sizeof (signal_catch[0]) * numsigs);
7552 signal_pass = (unsigned char *)
7553 xmalloc (sizeof (signal_pass[0]) * numsigs);
7554 for (i = 0; i < numsigs; i++)
7555 {
7556 signal_stop[i] = 1;
7557 signal_print[i] = 1;
7558 signal_program[i] = 1;
7559 signal_catch[i] = 0;
7560 }
7561
7562 /* Signals caused by debugger's own actions
7563 should not be given to the program afterwards. */
7564 signal_program[GDB_SIGNAL_TRAP] = 0;
7565 signal_program[GDB_SIGNAL_INT] = 0;
7566
7567 /* Signals that are not errors should not normally enter the debugger. */
7568 signal_stop[GDB_SIGNAL_ALRM] = 0;
7569 signal_print[GDB_SIGNAL_ALRM] = 0;
7570 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7571 signal_print[GDB_SIGNAL_VTALRM] = 0;
7572 signal_stop[GDB_SIGNAL_PROF] = 0;
7573 signal_print[GDB_SIGNAL_PROF] = 0;
7574 signal_stop[GDB_SIGNAL_CHLD] = 0;
7575 signal_print[GDB_SIGNAL_CHLD] = 0;
7576 signal_stop[GDB_SIGNAL_IO] = 0;
7577 signal_print[GDB_SIGNAL_IO] = 0;
7578 signal_stop[GDB_SIGNAL_POLL] = 0;
7579 signal_print[GDB_SIGNAL_POLL] = 0;
7580 signal_stop[GDB_SIGNAL_URG] = 0;
7581 signal_print[GDB_SIGNAL_URG] = 0;
7582 signal_stop[GDB_SIGNAL_WINCH] = 0;
7583 signal_print[GDB_SIGNAL_WINCH] = 0;
7584 signal_stop[GDB_SIGNAL_PRIO] = 0;
7585 signal_print[GDB_SIGNAL_PRIO] = 0;
7586
7587 /* These signals are used internally by user-level thread
7588 implementations. (See signal(5) on Solaris.) Like the above
7589 signals, a healthy program receives and handles them as part of
7590 its normal operation. */
7591 signal_stop[GDB_SIGNAL_LWP] = 0;
7592 signal_print[GDB_SIGNAL_LWP] = 0;
7593 signal_stop[GDB_SIGNAL_WAITING] = 0;
7594 signal_print[GDB_SIGNAL_WAITING] = 0;
7595 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7596 signal_print[GDB_SIGNAL_CANCEL] = 0;
7597
7598 /* Update cached state. */
7599 signal_cache_update (-1);
7600
7601 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7602 &stop_on_solib_events, _("\
7603 Set stopping for shared library events."), _("\
7604 Show stopping for shared library events."), _("\
7605 If nonzero, gdb will give control to the user when the dynamic linker\n\
7606 notifies gdb of shared library events. The most common event of interest\n\
7607 to the user would be loading/unloading of a new library."),
7608 set_stop_on_solib_events,
7609 show_stop_on_solib_events,
7610 &setlist, &showlist);
7611
7612 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7613 follow_fork_mode_kind_names,
7614 &follow_fork_mode_string, _("\
7615 Set debugger response to a program call of fork or vfork."), _("\
7616 Show debugger response to a program call of fork or vfork."), _("\
7617 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7618 parent - the original process is debugged after a fork\n\
7619 child - the new process is debugged after a fork\n\
7620 The unfollowed process will continue to run.\n\
7621 By default, the debugger will follow the parent process."),
7622 NULL,
7623 show_follow_fork_mode_string,
7624 &setlist, &showlist);
7625
7626 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7627 follow_exec_mode_names,
7628 &follow_exec_mode_string, _("\
7629 Set debugger response to a program call of exec."), _("\
7630 Show debugger response to a program call of exec."), _("\
7631 An exec call replaces the program image of a process.\n\
7632 \n\
7633 follow-exec-mode can be:\n\
7634 \n\
7635 new - the debugger creates a new inferior and rebinds the process\n\
7636 to this new inferior. The program the process was running before\n\
7637 the exec call can be restarted afterwards by restarting the original\n\
7638 inferior.\n\
7639 \n\
7640 same - the debugger keeps the process bound to the same inferior.\n\
7641 The new executable image replaces the previous executable loaded in\n\
7642 the inferior. Restarting the inferior after the exec call restarts\n\
7643 the executable the process was running after the exec call.\n\
7644 \n\
7645 By default, the debugger will use the same inferior."),
7646 NULL,
7647 show_follow_exec_mode_string,
7648 &setlist, &showlist);
7649
7650 add_setshow_enum_cmd ("scheduler-locking", class_run,
7651 scheduler_enums, &scheduler_mode, _("\
7652 Set mode for locking scheduler during execution."), _("\
7653 Show mode for locking scheduler during execution."), _("\
7654 off == no locking (threads may preempt at any time)\n\
7655 on == full locking (no thread except the current thread may run)\n\
7656 step == scheduler locked during every single-step operation.\n\
7657 In this mode, no other thread may run during a step command.\n\
7658 Other threads may run while stepping over a function call ('next')."),
7659 set_schedlock_func, /* traps on target vector */
7660 show_scheduler_mode,
7661 &setlist, &showlist);
7662
7663 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7664 Set mode for resuming threads of all processes."), _("\
7665 Show mode for resuming threads of all processes."), _("\
7666 When on, execution commands (such as 'continue' or 'next') resume all\n\
7667 threads of all processes. When off (which is the default), execution\n\
7668 commands only resume the threads of the current process. The set of\n\
7669 threads that are resumed is further refined by the scheduler-locking\n\
7670 mode (see help set scheduler-locking)."),
7671 NULL,
7672 show_schedule_multiple,
7673 &setlist, &showlist);
7674
7675 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7676 Set mode of the step operation."), _("\
7677 Show mode of the step operation."), _("\
7678 When set, doing a step over a function without debug line information\n\
7679 will stop at the first instruction of that function. Otherwise, the\n\
7680 function is skipped and the step command stops at a different source line."),
7681 NULL,
7682 show_step_stop_if_no_debug,
7683 &setlist, &showlist);
7684
7685 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7686 &can_use_displaced_stepping, _("\
7687 Set debugger's willingness to use displaced stepping."), _("\
7688 Show debugger's willingness to use displaced stepping."), _("\
7689 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7690 supported by the target architecture. If off, gdb will not use displaced\n\
7691 stepping to step over breakpoints, even if such is supported by the target\n\
7692 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7693 if the target architecture supports it and non-stop mode is active, but will not\n\
7694 use it in all-stop mode (see help set non-stop)."),
7695 NULL,
7696 show_can_use_displaced_stepping,
7697 &setlist, &showlist);
7698
7699 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7700 &exec_direction, _("Set direction of execution.\n\
7701 Options are 'forward' or 'reverse'."),
7702 _("Show direction of execution (forward/reverse)."),
7703 _("Tells gdb whether to execute forward or backward."),
7704 set_exec_direction_func, show_exec_direction_func,
7705 &setlist, &showlist);
7706
7707 /* Set/show detach-on-fork: user-settable mode. */
7708
7709 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7710 Set whether gdb will detach the child of a fork."), _("\
7711 Show whether gdb will detach the child of a fork."), _("\
7712 Tells gdb whether to detach the child of a fork."),
7713 NULL, NULL, &setlist, &showlist);
7714
7715 /* Set/show disable address space randomization mode. */
7716
7717 add_setshow_boolean_cmd ("disable-randomization", class_support,
7718 &disable_randomization, _("\
7719 Set disabling of debuggee's virtual address space randomization."), _("\
7720 Show disabling of debuggee's virtual address space randomization."), _("\
7721 When this mode is on (which is the default), randomization of the virtual\n\
7722 address space is disabled. Standalone programs run with the randomization\n\
7723 enabled by default on some platforms."),
7724 &set_disable_randomization,
7725 &show_disable_randomization,
7726 &setlist, &showlist);
7727
7728 /* ptid initializations */
7729 inferior_ptid = null_ptid;
7730 target_last_wait_ptid = minus_one_ptid;
7731
7732 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7733 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7734 observer_attach_thread_exit (infrun_thread_thread_exit);
7735 observer_attach_inferior_exit (infrun_inferior_exit);
7736
7737 /* Explicitly create without lookup, since that tries to create a
7738 value with a void typed value, and when we get here, gdbarch
7739 isn't initialized yet. At this point, we're quite sure there
7740 isn't another convenience variable of the same name. */
7741 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7742
7743 add_setshow_boolean_cmd ("observer", no_class,
7744 &observer_mode_1, _("\
7745 Set whether gdb controls the inferior in observer mode."), _("\
7746 Show whether gdb controls the inferior in observer mode."), _("\
7747 In observer mode, GDB can get data from the inferior, but not\n\
7748 affect its execution. Registers and memory may not be changed,\n\
7749 breakpoints may not be set, and the program cannot be interrupted\n\
7750 or signalled."),
7751 set_observer_mode,
7752 show_observer_mode,
7753 &setlist,
7754 &showlist);
7755 }
This page took 0.279529 seconds and 5 git commands to generate.