Make single-step breakpoints be per-thread
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static int follow_fork_inferior (int follow_child, int detach_fork);
83
84 static void follow_inferior_reset_breakpoints (void);
85
86 static void set_schedlock_func (char *args, int from_tty,
87 struct cmd_list_element *c);
88
89 static int currently_stepping (struct thread_info *tp);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Function inferior was in as of last step command. */
330
331 static struct symbol *step_start_function;
332
333 /* Nonzero if we want to give control to the user when we're notified
334 of shared library events by the dynamic linker. */
335 int stop_on_solib_events;
336
337 /* Enable or disable optional shared library event breakpoints
338 as appropriate when the above flag is changed. */
339
340 static void
341 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
342 {
343 update_solib_breakpoints ();
344 }
345
346 static void
347 show_stop_on_solib_events (struct ui_file *file, int from_tty,
348 struct cmd_list_element *c, const char *value)
349 {
350 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
351 value);
352 }
353
354 /* Nonzero means expecting a trace trap
355 and should stop the inferior and return silently when it happens. */
356
357 int stop_after_trap;
358
359 /* Save register contents here when executing a "finish" command or are
360 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
361 Thus this contains the return value from the called function (assuming
362 values are returned in a register). */
363
364 struct regcache *stop_registers;
365
366 /* Nonzero after stop if current stack frame should be printed. */
367
368 static int stop_print_frame;
369
370 /* This is a cached copy of the pid/waitstatus of the last event
371 returned by target_wait()/deprecated_target_wait_hook(). This
372 information is returned by get_last_target_status(). */
373 static ptid_t target_last_wait_ptid;
374 static struct target_waitstatus target_last_waitstatus;
375
376 static void context_switch (ptid_t ptid);
377
378 void init_thread_stepping_state (struct thread_info *tss);
379
380 static const char follow_fork_mode_child[] = "child";
381 static const char follow_fork_mode_parent[] = "parent";
382
383 static const char *const follow_fork_mode_kind_names[] = {
384 follow_fork_mode_child,
385 follow_fork_mode_parent,
386 NULL
387 };
388
389 static const char *follow_fork_mode_string = follow_fork_mode_parent;
390 static void
391 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
392 struct cmd_list_element *c, const char *value)
393 {
394 fprintf_filtered (file,
395 _("Debugger response to a program "
396 "call of fork or vfork is \"%s\".\n"),
397 value);
398 }
399 \f
400
401 /* Handle changes to the inferior list based on the type of fork,
402 which process is being followed, and whether the other process
403 should be detached. On entry inferior_ptid must be the ptid of
404 the fork parent. At return inferior_ptid is the ptid of the
405 followed inferior. */
406
407 static int
408 follow_fork_inferior (int follow_child, int detach_fork)
409 {
410 int has_vforked;
411 int parent_pid, child_pid;
412
413 has_vforked = (inferior_thread ()->pending_follow.kind
414 == TARGET_WAITKIND_VFORKED);
415 parent_pid = ptid_get_lwp (inferior_ptid);
416 if (parent_pid == 0)
417 parent_pid = ptid_get_pid (inferior_ptid);
418 child_pid
419 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
420
421 if (has_vforked
422 && !non_stop /* Non-stop always resumes both branches. */
423 && (!target_is_async_p () || sync_execution)
424 && !(follow_child || detach_fork || sched_multi))
425 {
426 /* The parent stays blocked inside the vfork syscall until the
427 child execs or exits. If we don't let the child run, then
428 the parent stays blocked. If we're telling the parent to run
429 in the foreground, the user will not be able to ctrl-c to get
430 back the terminal, effectively hanging the debug session. */
431 fprintf_filtered (gdb_stderr, _("\
432 Can not resume the parent process over vfork in the foreground while\n\
433 holding the child stopped. Try \"set detach-on-fork\" or \
434 \"set schedule-multiple\".\n"));
435 /* FIXME output string > 80 columns. */
436 return 1;
437 }
438
439 if (!follow_child)
440 {
441 /* Detach new forked process? */
442 if (detach_fork)
443 {
444 struct cleanup *old_chain;
445
446 /* Before detaching from the child, remove all breakpoints
447 from it. If we forked, then this has already been taken
448 care of by infrun.c. If we vforked however, any
449 breakpoint inserted in the parent is visible in the
450 child, even those added while stopped in a vfork
451 catchpoint. This will remove the breakpoints from the
452 parent also, but they'll be reinserted below. */
453 if (has_vforked)
454 {
455 /* Keep breakpoints list in sync. */
456 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
457 }
458
459 if (info_verbose || debug_infrun)
460 {
461 target_terminal_ours ();
462 fprintf_filtered (gdb_stdlog,
463 "Detaching after fork from "
464 "child process %d.\n",
465 child_pid);
466 }
467 }
468 else
469 {
470 struct inferior *parent_inf, *child_inf;
471 struct cleanup *old_chain;
472
473 /* Add process to GDB's tables. */
474 child_inf = add_inferior (child_pid);
475
476 parent_inf = current_inferior ();
477 child_inf->attach_flag = parent_inf->attach_flag;
478 copy_terminal_info (child_inf, parent_inf);
479 child_inf->gdbarch = parent_inf->gdbarch;
480 copy_inferior_target_desc_info (child_inf, parent_inf);
481
482 old_chain = save_inferior_ptid ();
483 save_current_program_space ();
484
485 inferior_ptid = ptid_build (child_pid, child_pid, 0);
486 add_thread (inferior_ptid);
487 child_inf->symfile_flags = SYMFILE_NO_READ;
488
489 /* If this is a vfork child, then the address-space is
490 shared with the parent. */
491 if (has_vforked)
492 {
493 child_inf->pspace = parent_inf->pspace;
494 child_inf->aspace = parent_inf->aspace;
495
496 /* The parent will be frozen until the child is done
497 with the shared region. Keep track of the
498 parent. */
499 child_inf->vfork_parent = parent_inf;
500 child_inf->pending_detach = 0;
501 parent_inf->vfork_child = child_inf;
502 parent_inf->pending_detach = 0;
503 }
504 else
505 {
506 child_inf->aspace = new_address_space ();
507 child_inf->pspace = add_program_space (child_inf->aspace);
508 child_inf->removable = 1;
509 set_current_program_space (child_inf->pspace);
510 clone_program_space (child_inf->pspace, parent_inf->pspace);
511
512 /* Let the shared library layer (e.g., solib-svr4) learn
513 about this new process, relocate the cloned exec, pull
514 in shared libraries, and install the solib event
515 breakpoint. If a "cloned-VM" event was propagated
516 better throughout the core, this wouldn't be
517 required. */
518 solib_create_inferior_hook (0);
519 }
520
521 do_cleanups (old_chain);
522 }
523
524 if (has_vforked)
525 {
526 struct inferior *parent_inf;
527
528 parent_inf = current_inferior ();
529
530 /* If we detached from the child, then we have to be careful
531 to not insert breakpoints in the parent until the child
532 is done with the shared memory region. However, if we're
533 staying attached to the child, then we can and should
534 insert breakpoints, so that we can debug it. A
535 subsequent child exec or exit is enough to know when does
536 the child stops using the parent's address space. */
537 parent_inf->waiting_for_vfork_done = detach_fork;
538 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
539 }
540 }
541 else
542 {
543 /* Follow the child. */
544 struct inferior *parent_inf, *child_inf;
545 struct program_space *parent_pspace;
546
547 if (info_verbose || debug_infrun)
548 {
549 target_terminal_ours ();
550 if (has_vforked)
551 fprintf_filtered (gdb_stdlog,
552 _("Attaching after process %d "
553 "vfork to child process %d.\n"),
554 parent_pid, child_pid);
555 else
556 fprintf_filtered (gdb_stdlog,
557 _("Attaching after process %d "
558 "fork to child process %d.\n"),
559 parent_pid, child_pid);
560 }
561
562 /* Add the new inferior first, so that the target_detach below
563 doesn't unpush the target. */
564
565 child_inf = add_inferior (child_pid);
566
567 parent_inf = current_inferior ();
568 child_inf->attach_flag = parent_inf->attach_flag;
569 copy_terminal_info (child_inf, parent_inf);
570 child_inf->gdbarch = parent_inf->gdbarch;
571 copy_inferior_target_desc_info (child_inf, parent_inf);
572
573 parent_pspace = parent_inf->pspace;
574
575 /* If we're vforking, we want to hold on to the parent until the
576 child exits or execs. At child exec or exit time we can
577 remove the old breakpoints from the parent and detach or
578 resume debugging it. Otherwise, detach the parent now; we'll
579 want to reuse it's program/address spaces, but we can't set
580 them to the child before removing breakpoints from the
581 parent, otherwise, the breakpoints module could decide to
582 remove breakpoints from the wrong process (since they'd be
583 assigned to the same address space). */
584
585 if (has_vforked)
586 {
587 gdb_assert (child_inf->vfork_parent == NULL);
588 gdb_assert (parent_inf->vfork_child == NULL);
589 child_inf->vfork_parent = parent_inf;
590 child_inf->pending_detach = 0;
591 parent_inf->vfork_child = child_inf;
592 parent_inf->pending_detach = detach_fork;
593 parent_inf->waiting_for_vfork_done = 0;
594 }
595 else if (detach_fork)
596 target_detach (NULL, 0);
597
598 /* Note that the detach above makes PARENT_INF dangling. */
599
600 /* Add the child thread to the appropriate lists, and switch to
601 this new thread, before cloning the program space, and
602 informing the solib layer about this new process. */
603
604 inferior_ptid = ptid_build (child_pid, child_pid, 0);
605 add_thread (inferior_ptid);
606
607 /* If this is a vfork child, then the address-space is shared
608 with the parent. If we detached from the parent, then we can
609 reuse the parent's program/address spaces. */
610 if (has_vforked || detach_fork)
611 {
612 child_inf->pspace = parent_pspace;
613 child_inf->aspace = child_inf->pspace->aspace;
614 }
615 else
616 {
617 child_inf->aspace = new_address_space ();
618 child_inf->pspace = add_program_space (child_inf->aspace);
619 child_inf->removable = 1;
620 child_inf->symfile_flags = SYMFILE_NO_READ;
621 set_current_program_space (child_inf->pspace);
622 clone_program_space (child_inf->pspace, parent_pspace);
623
624 /* Let the shared library layer (e.g., solib-svr4) learn
625 about this new process, relocate the cloned exec, pull in
626 shared libraries, and install the solib event breakpoint.
627 If a "cloned-VM" event was propagated better throughout
628 the core, this wouldn't be required. */
629 solib_create_inferior_hook (0);
630 }
631 }
632
633 return target_follow_fork (follow_child, detach_fork);
634 }
635
636 /* Tell the target to follow the fork we're stopped at. Returns true
637 if the inferior should be resumed; false, if the target for some
638 reason decided it's best not to resume. */
639
640 static int
641 follow_fork (void)
642 {
643 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
644 int should_resume = 1;
645 struct thread_info *tp;
646
647 /* Copy user stepping state to the new inferior thread. FIXME: the
648 followed fork child thread should have a copy of most of the
649 parent thread structure's run control related fields, not just these.
650 Initialized to avoid "may be used uninitialized" warnings from gcc. */
651 struct breakpoint *step_resume_breakpoint = NULL;
652 struct breakpoint *exception_resume_breakpoint = NULL;
653 CORE_ADDR step_range_start = 0;
654 CORE_ADDR step_range_end = 0;
655 struct frame_id step_frame_id = { 0 };
656 struct interp *command_interp = NULL;
657
658 if (!non_stop)
659 {
660 ptid_t wait_ptid;
661 struct target_waitstatus wait_status;
662
663 /* Get the last target status returned by target_wait(). */
664 get_last_target_status (&wait_ptid, &wait_status);
665
666 /* If not stopped at a fork event, then there's nothing else to
667 do. */
668 if (wait_status.kind != TARGET_WAITKIND_FORKED
669 && wait_status.kind != TARGET_WAITKIND_VFORKED)
670 return 1;
671
672 /* Check if we switched over from WAIT_PTID, since the event was
673 reported. */
674 if (!ptid_equal (wait_ptid, minus_one_ptid)
675 && !ptid_equal (inferior_ptid, wait_ptid))
676 {
677 /* We did. Switch back to WAIT_PTID thread, to tell the
678 target to follow it (in either direction). We'll
679 afterwards refuse to resume, and inform the user what
680 happened. */
681 switch_to_thread (wait_ptid);
682 should_resume = 0;
683 }
684 }
685
686 tp = inferior_thread ();
687
688 /* If there were any forks/vforks that were caught and are now to be
689 followed, then do so now. */
690 switch (tp->pending_follow.kind)
691 {
692 case TARGET_WAITKIND_FORKED:
693 case TARGET_WAITKIND_VFORKED:
694 {
695 ptid_t parent, child;
696
697 /* If the user did a next/step, etc, over a fork call,
698 preserve the stepping state in the fork child. */
699 if (follow_child && should_resume)
700 {
701 step_resume_breakpoint = clone_momentary_breakpoint
702 (tp->control.step_resume_breakpoint);
703 step_range_start = tp->control.step_range_start;
704 step_range_end = tp->control.step_range_end;
705 step_frame_id = tp->control.step_frame_id;
706 exception_resume_breakpoint
707 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
708 command_interp = tp->control.command_interp;
709
710 /* For now, delete the parent's sr breakpoint, otherwise,
711 parent/child sr breakpoints are considered duplicates,
712 and the child version will not be installed. Remove
713 this when the breakpoints module becomes aware of
714 inferiors and address spaces. */
715 delete_step_resume_breakpoint (tp);
716 tp->control.step_range_start = 0;
717 tp->control.step_range_end = 0;
718 tp->control.step_frame_id = null_frame_id;
719 delete_exception_resume_breakpoint (tp);
720 tp->control.command_interp = NULL;
721 }
722
723 parent = inferior_ptid;
724 child = tp->pending_follow.value.related_pid;
725
726 /* Set up inferior(s) as specified by the caller, and tell the
727 target to do whatever is necessary to follow either parent
728 or child. */
729 if (follow_fork_inferior (follow_child, detach_fork))
730 {
731 /* Target refused to follow, or there's some other reason
732 we shouldn't resume. */
733 should_resume = 0;
734 }
735 else
736 {
737 /* This pending follow fork event is now handled, one way
738 or another. The previous selected thread may be gone
739 from the lists by now, but if it is still around, need
740 to clear the pending follow request. */
741 tp = find_thread_ptid (parent);
742 if (tp)
743 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
744
745 /* This makes sure we don't try to apply the "Switched
746 over from WAIT_PID" logic above. */
747 nullify_last_target_wait_ptid ();
748
749 /* If we followed the child, switch to it... */
750 if (follow_child)
751 {
752 switch_to_thread (child);
753
754 /* ... and preserve the stepping state, in case the
755 user was stepping over the fork call. */
756 if (should_resume)
757 {
758 tp = inferior_thread ();
759 tp->control.step_resume_breakpoint
760 = step_resume_breakpoint;
761 tp->control.step_range_start = step_range_start;
762 tp->control.step_range_end = step_range_end;
763 tp->control.step_frame_id = step_frame_id;
764 tp->control.exception_resume_breakpoint
765 = exception_resume_breakpoint;
766 tp->control.command_interp = command_interp;
767 }
768 else
769 {
770 /* If we get here, it was because we're trying to
771 resume from a fork catchpoint, but, the user
772 has switched threads away from the thread that
773 forked. In that case, the resume command
774 issued is most likely not applicable to the
775 child, so just warn, and refuse to resume. */
776 warning (_("Not resuming: switched threads "
777 "before following fork child.\n"));
778 }
779
780 /* Reset breakpoints in the child as appropriate. */
781 follow_inferior_reset_breakpoints ();
782 }
783 else
784 switch_to_thread (parent);
785 }
786 }
787 break;
788 case TARGET_WAITKIND_SPURIOUS:
789 /* Nothing to follow. */
790 break;
791 default:
792 internal_error (__FILE__, __LINE__,
793 "Unexpected pending_follow.kind %d\n",
794 tp->pending_follow.kind);
795 break;
796 }
797
798 return should_resume;
799 }
800
801 static void
802 follow_inferior_reset_breakpoints (void)
803 {
804 struct thread_info *tp = inferior_thread ();
805
806 /* Was there a step_resume breakpoint? (There was if the user
807 did a "next" at the fork() call.) If so, explicitly reset its
808 thread number. Cloned step_resume breakpoints are disabled on
809 creation, so enable it here now that it is associated with the
810 correct thread.
811
812 step_resumes are a form of bp that are made to be per-thread.
813 Since we created the step_resume bp when the parent process
814 was being debugged, and now are switching to the child process,
815 from the breakpoint package's viewpoint, that's a switch of
816 "threads". We must update the bp's notion of which thread
817 it is for, or it'll be ignored when it triggers. */
818
819 if (tp->control.step_resume_breakpoint)
820 {
821 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
822 tp->control.step_resume_breakpoint->loc->enabled = 1;
823 }
824
825 /* Treat exception_resume breakpoints like step_resume breakpoints. */
826 if (tp->control.exception_resume_breakpoint)
827 {
828 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
829 tp->control.exception_resume_breakpoint->loc->enabled = 1;
830 }
831
832 /* Reinsert all breakpoints in the child. The user may have set
833 breakpoints after catching the fork, in which case those
834 were never set in the child, but only in the parent. This makes
835 sure the inserted breakpoints match the breakpoint list. */
836
837 breakpoint_re_set ();
838 insert_breakpoints ();
839 }
840
841 /* The child has exited or execed: resume threads of the parent the
842 user wanted to be executing. */
843
844 static int
845 proceed_after_vfork_done (struct thread_info *thread,
846 void *arg)
847 {
848 int pid = * (int *) arg;
849
850 if (ptid_get_pid (thread->ptid) == pid
851 && is_running (thread->ptid)
852 && !is_executing (thread->ptid)
853 && !thread->stop_requested
854 && thread->suspend.stop_signal == GDB_SIGNAL_0)
855 {
856 if (debug_infrun)
857 fprintf_unfiltered (gdb_stdlog,
858 "infrun: resuming vfork parent thread %s\n",
859 target_pid_to_str (thread->ptid));
860
861 switch_to_thread (thread->ptid);
862 clear_proceed_status (0);
863 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
864 }
865
866 return 0;
867 }
868
869 /* Called whenever we notice an exec or exit event, to handle
870 detaching or resuming a vfork parent. */
871
872 static void
873 handle_vfork_child_exec_or_exit (int exec)
874 {
875 struct inferior *inf = current_inferior ();
876
877 if (inf->vfork_parent)
878 {
879 int resume_parent = -1;
880
881 /* This exec or exit marks the end of the shared memory region
882 between the parent and the child. If the user wanted to
883 detach from the parent, now is the time. */
884
885 if (inf->vfork_parent->pending_detach)
886 {
887 struct thread_info *tp;
888 struct cleanup *old_chain;
889 struct program_space *pspace;
890 struct address_space *aspace;
891
892 /* follow-fork child, detach-on-fork on. */
893
894 inf->vfork_parent->pending_detach = 0;
895
896 if (!exec)
897 {
898 /* If we're handling a child exit, then inferior_ptid
899 points at the inferior's pid, not to a thread. */
900 old_chain = save_inferior_ptid ();
901 save_current_program_space ();
902 save_current_inferior ();
903 }
904 else
905 old_chain = save_current_space_and_thread ();
906
907 /* We're letting loose of the parent. */
908 tp = any_live_thread_of_process (inf->vfork_parent->pid);
909 switch_to_thread (tp->ptid);
910
911 /* We're about to detach from the parent, which implicitly
912 removes breakpoints from its address space. There's a
913 catch here: we want to reuse the spaces for the child,
914 but, parent/child are still sharing the pspace at this
915 point, although the exec in reality makes the kernel give
916 the child a fresh set of new pages. The problem here is
917 that the breakpoints module being unaware of this, would
918 likely chose the child process to write to the parent
919 address space. Swapping the child temporarily away from
920 the spaces has the desired effect. Yes, this is "sort
921 of" a hack. */
922
923 pspace = inf->pspace;
924 aspace = inf->aspace;
925 inf->aspace = NULL;
926 inf->pspace = NULL;
927
928 if (debug_infrun || info_verbose)
929 {
930 target_terminal_ours ();
931
932 if (exec)
933 fprintf_filtered (gdb_stdlog,
934 "Detaching vfork parent process "
935 "%d after child exec.\n",
936 inf->vfork_parent->pid);
937 else
938 fprintf_filtered (gdb_stdlog,
939 "Detaching vfork parent process "
940 "%d after child exit.\n",
941 inf->vfork_parent->pid);
942 }
943
944 target_detach (NULL, 0);
945
946 /* Put it back. */
947 inf->pspace = pspace;
948 inf->aspace = aspace;
949
950 do_cleanups (old_chain);
951 }
952 else if (exec)
953 {
954 /* We're staying attached to the parent, so, really give the
955 child a new address space. */
956 inf->pspace = add_program_space (maybe_new_address_space ());
957 inf->aspace = inf->pspace->aspace;
958 inf->removable = 1;
959 set_current_program_space (inf->pspace);
960
961 resume_parent = inf->vfork_parent->pid;
962
963 /* Break the bonds. */
964 inf->vfork_parent->vfork_child = NULL;
965 }
966 else
967 {
968 struct cleanup *old_chain;
969 struct program_space *pspace;
970
971 /* If this is a vfork child exiting, then the pspace and
972 aspaces were shared with the parent. Since we're
973 reporting the process exit, we'll be mourning all that is
974 found in the address space, and switching to null_ptid,
975 preparing to start a new inferior. But, since we don't
976 want to clobber the parent's address/program spaces, we
977 go ahead and create a new one for this exiting
978 inferior. */
979
980 /* Switch to null_ptid, so that clone_program_space doesn't want
981 to read the selected frame of a dead process. */
982 old_chain = save_inferior_ptid ();
983 inferior_ptid = null_ptid;
984
985 /* This inferior is dead, so avoid giving the breakpoints
986 module the option to write through to it (cloning a
987 program space resets breakpoints). */
988 inf->aspace = NULL;
989 inf->pspace = NULL;
990 pspace = add_program_space (maybe_new_address_space ());
991 set_current_program_space (pspace);
992 inf->removable = 1;
993 inf->symfile_flags = SYMFILE_NO_READ;
994 clone_program_space (pspace, inf->vfork_parent->pspace);
995 inf->pspace = pspace;
996 inf->aspace = pspace->aspace;
997
998 /* Put back inferior_ptid. We'll continue mourning this
999 inferior. */
1000 do_cleanups (old_chain);
1001
1002 resume_parent = inf->vfork_parent->pid;
1003 /* Break the bonds. */
1004 inf->vfork_parent->vfork_child = NULL;
1005 }
1006
1007 inf->vfork_parent = NULL;
1008
1009 gdb_assert (current_program_space == inf->pspace);
1010
1011 if (non_stop && resume_parent != -1)
1012 {
1013 /* If the user wanted the parent to be running, let it go
1014 free now. */
1015 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1016
1017 if (debug_infrun)
1018 fprintf_unfiltered (gdb_stdlog,
1019 "infrun: resuming vfork parent process %d\n",
1020 resume_parent);
1021
1022 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1023
1024 do_cleanups (old_chain);
1025 }
1026 }
1027 }
1028
1029 /* Enum strings for "set|show follow-exec-mode". */
1030
1031 static const char follow_exec_mode_new[] = "new";
1032 static const char follow_exec_mode_same[] = "same";
1033 static const char *const follow_exec_mode_names[] =
1034 {
1035 follow_exec_mode_new,
1036 follow_exec_mode_same,
1037 NULL,
1038 };
1039
1040 static const char *follow_exec_mode_string = follow_exec_mode_same;
1041 static void
1042 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1043 struct cmd_list_element *c, const char *value)
1044 {
1045 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1046 }
1047
1048 /* EXECD_PATHNAME is assumed to be non-NULL. */
1049
1050 static void
1051 follow_exec (ptid_t pid, char *execd_pathname)
1052 {
1053 struct thread_info *th = inferior_thread ();
1054 struct inferior *inf = current_inferior ();
1055
1056 /* This is an exec event that we actually wish to pay attention to.
1057 Refresh our symbol table to the newly exec'd program, remove any
1058 momentary bp's, etc.
1059
1060 If there are breakpoints, they aren't really inserted now,
1061 since the exec() transformed our inferior into a fresh set
1062 of instructions.
1063
1064 We want to preserve symbolic breakpoints on the list, since
1065 we have hopes that they can be reset after the new a.out's
1066 symbol table is read.
1067
1068 However, any "raw" breakpoints must be removed from the list
1069 (e.g., the solib bp's), since their address is probably invalid
1070 now.
1071
1072 And, we DON'T want to call delete_breakpoints() here, since
1073 that may write the bp's "shadow contents" (the instruction
1074 value that was overwritten witha TRAP instruction). Since
1075 we now have a new a.out, those shadow contents aren't valid. */
1076
1077 mark_breakpoints_out ();
1078
1079 update_breakpoints_after_exec ();
1080
1081 /* If there was one, it's gone now. We cannot truly step-to-next
1082 statement through an exec(). */
1083 th->control.step_resume_breakpoint = NULL;
1084 th->control.exception_resume_breakpoint = NULL;
1085 th->control.single_step_breakpoints = NULL;
1086 th->control.step_range_start = 0;
1087 th->control.step_range_end = 0;
1088
1089 /* The target reports the exec event to the main thread, even if
1090 some other thread does the exec, and even if the main thread was
1091 already stopped --- if debugging in non-stop mode, it's possible
1092 the user had the main thread held stopped in the previous image
1093 --- release it now. This is the same behavior as step-over-exec
1094 with scheduler-locking on in all-stop mode. */
1095 th->stop_requested = 0;
1096
1097 /* What is this a.out's name? */
1098 printf_unfiltered (_("%s is executing new program: %s\n"),
1099 target_pid_to_str (inferior_ptid),
1100 execd_pathname);
1101
1102 /* We've followed the inferior through an exec. Therefore, the
1103 inferior has essentially been killed & reborn. */
1104
1105 gdb_flush (gdb_stdout);
1106
1107 breakpoint_init_inferior (inf_execd);
1108
1109 if (gdb_sysroot && *gdb_sysroot)
1110 {
1111 char *name = alloca (strlen (gdb_sysroot)
1112 + strlen (execd_pathname)
1113 + 1);
1114
1115 strcpy (name, gdb_sysroot);
1116 strcat (name, execd_pathname);
1117 execd_pathname = name;
1118 }
1119
1120 /* Reset the shared library package. This ensures that we get a
1121 shlib event when the child reaches "_start", at which point the
1122 dld will have had a chance to initialize the child. */
1123 /* Also, loading a symbol file below may trigger symbol lookups, and
1124 we don't want those to be satisfied by the libraries of the
1125 previous incarnation of this process. */
1126 no_shared_libraries (NULL, 0);
1127
1128 if (follow_exec_mode_string == follow_exec_mode_new)
1129 {
1130 struct program_space *pspace;
1131
1132 /* The user wants to keep the old inferior and program spaces
1133 around. Create a new fresh one, and switch to it. */
1134
1135 inf = add_inferior (current_inferior ()->pid);
1136 pspace = add_program_space (maybe_new_address_space ());
1137 inf->pspace = pspace;
1138 inf->aspace = pspace->aspace;
1139
1140 exit_inferior_num_silent (current_inferior ()->num);
1141
1142 set_current_inferior (inf);
1143 set_current_program_space (pspace);
1144 }
1145 else
1146 {
1147 /* The old description may no longer be fit for the new image.
1148 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1149 old description; we'll read a new one below. No need to do
1150 this on "follow-exec-mode new", as the old inferior stays
1151 around (its description is later cleared/refetched on
1152 restart). */
1153 target_clear_description ();
1154 }
1155
1156 gdb_assert (current_program_space == inf->pspace);
1157
1158 /* That a.out is now the one to use. */
1159 exec_file_attach (execd_pathname, 0);
1160
1161 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1162 (Position Independent Executable) main symbol file will get applied by
1163 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1164 the breakpoints with the zero displacement. */
1165
1166 symbol_file_add (execd_pathname,
1167 (inf->symfile_flags
1168 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1169 NULL, 0);
1170
1171 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1172 set_initial_language ();
1173
1174 /* If the target can specify a description, read it. Must do this
1175 after flipping to the new executable (because the target supplied
1176 description must be compatible with the executable's
1177 architecture, and the old executable may e.g., be 32-bit, while
1178 the new one 64-bit), and before anything involving memory or
1179 registers. */
1180 target_find_description ();
1181
1182 solib_create_inferior_hook (0);
1183
1184 jit_inferior_created_hook ();
1185
1186 breakpoint_re_set ();
1187
1188 /* Reinsert all breakpoints. (Those which were symbolic have
1189 been reset to the proper address in the new a.out, thanks
1190 to symbol_file_command...). */
1191 insert_breakpoints ();
1192
1193 /* The next resume of this inferior should bring it to the shlib
1194 startup breakpoints. (If the user had also set bp's on
1195 "main" from the old (parent) process, then they'll auto-
1196 matically get reset there in the new process.). */
1197 }
1198
1199 /* Info about an instruction that is being stepped over. */
1200
1201 struct step_over_info
1202 {
1203 /* If we're stepping past a breakpoint, this is the address space
1204 and address of the instruction the breakpoint is set at. We'll
1205 skip inserting all breakpoints here. Valid iff ASPACE is
1206 non-NULL. */
1207 struct address_space *aspace;
1208 CORE_ADDR address;
1209
1210 /* The instruction being stepped over triggers a nonsteppable
1211 watchpoint. If true, we'll skip inserting watchpoints. */
1212 int nonsteppable_watchpoint_p;
1213 };
1214
1215 /* The step-over info of the location that is being stepped over.
1216
1217 Note that with async/breakpoint always-inserted mode, a user might
1218 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1219 being stepped over. As setting a new breakpoint inserts all
1220 breakpoints, we need to make sure the breakpoint being stepped over
1221 isn't inserted then. We do that by only clearing the step-over
1222 info when the step-over is actually finished (or aborted).
1223
1224 Presently GDB can only step over one breakpoint at any given time.
1225 Given threads that can't run code in the same address space as the
1226 breakpoint's can't really miss the breakpoint, GDB could be taught
1227 to step-over at most one breakpoint per address space (so this info
1228 could move to the address space object if/when GDB is extended).
1229 The set of breakpoints being stepped over will normally be much
1230 smaller than the set of all breakpoints, so a flag in the
1231 breakpoint location structure would be wasteful. A separate list
1232 also saves complexity and run-time, as otherwise we'd have to go
1233 through all breakpoint locations clearing their flag whenever we
1234 start a new sequence. Similar considerations weigh against storing
1235 this info in the thread object. Plus, not all step overs actually
1236 have breakpoint locations -- e.g., stepping past a single-step
1237 breakpoint, or stepping to complete a non-continuable
1238 watchpoint. */
1239 static struct step_over_info step_over_info;
1240
1241 /* Record the address of the breakpoint/instruction we're currently
1242 stepping over. */
1243
1244 static void
1245 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1246 int nonsteppable_watchpoint_p)
1247 {
1248 step_over_info.aspace = aspace;
1249 step_over_info.address = address;
1250 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1251 }
1252
1253 /* Called when we're not longer stepping over a breakpoint / an
1254 instruction, so all breakpoints are free to be (re)inserted. */
1255
1256 static void
1257 clear_step_over_info (void)
1258 {
1259 step_over_info.aspace = NULL;
1260 step_over_info.address = 0;
1261 step_over_info.nonsteppable_watchpoint_p = 0;
1262 }
1263
1264 /* See infrun.h. */
1265
1266 int
1267 stepping_past_instruction_at (struct address_space *aspace,
1268 CORE_ADDR address)
1269 {
1270 return (step_over_info.aspace != NULL
1271 && breakpoint_address_match (aspace, address,
1272 step_over_info.aspace,
1273 step_over_info.address));
1274 }
1275
1276 /* See infrun.h. */
1277
1278 int
1279 stepping_past_nonsteppable_watchpoint (void)
1280 {
1281 return step_over_info.nonsteppable_watchpoint_p;
1282 }
1283
1284 /* Returns true if step-over info is valid. */
1285
1286 static int
1287 step_over_info_valid_p (void)
1288 {
1289 return (step_over_info.aspace != NULL
1290 || stepping_past_nonsteppable_watchpoint ());
1291 }
1292
1293 \f
1294 /* Displaced stepping. */
1295
1296 /* In non-stop debugging mode, we must take special care to manage
1297 breakpoints properly; in particular, the traditional strategy for
1298 stepping a thread past a breakpoint it has hit is unsuitable.
1299 'Displaced stepping' is a tactic for stepping one thread past a
1300 breakpoint it has hit while ensuring that other threads running
1301 concurrently will hit the breakpoint as they should.
1302
1303 The traditional way to step a thread T off a breakpoint in a
1304 multi-threaded program in all-stop mode is as follows:
1305
1306 a0) Initially, all threads are stopped, and breakpoints are not
1307 inserted.
1308 a1) We single-step T, leaving breakpoints uninserted.
1309 a2) We insert breakpoints, and resume all threads.
1310
1311 In non-stop debugging, however, this strategy is unsuitable: we
1312 don't want to have to stop all threads in the system in order to
1313 continue or step T past a breakpoint. Instead, we use displaced
1314 stepping:
1315
1316 n0) Initially, T is stopped, other threads are running, and
1317 breakpoints are inserted.
1318 n1) We copy the instruction "under" the breakpoint to a separate
1319 location, outside the main code stream, making any adjustments
1320 to the instruction, register, and memory state as directed by
1321 T's architecture.
1322 n2) We single-step T over the instruction at its new location.
1323 n3) We adjust the resulting register and memory state as directed
1324 by T's architecture. This includes resetting T's PC to point
1325 back into the main instruction stream.
1326 n4) We resume T.
1327
1328 This approach depends on the following gdbarch methods:
1329
1330 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1331 indicate where to copy the instruction, and how much space must
1332 be reserved there. We use these in step n1.
1333
1334 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1335 address, and makes any necessary adjustments to the instruction,
1336 register contents, and memory. We use this in step n1.
1337
1338 - gdbarch_displaced_step_fixup adjusts registers and memory after
1339 we have successfuly single-stepped the instruction, to yield the
1340 same effect the instruction would have had if we had executed it
1341 at its original address. We use this in step n3.
1342
1343 - gdbarch_displaced_step_free_closure provides cleanup.
1344
1345 The gdbarch_displaced_step_copy_insn and
1346 gdbarch_displaced_step_fixup functions must be written so that
1347 copying an instruction with gdbarch_displaced_step_copy_insn,
1348 single-stepping across the copied instruction, and then applying
1349 gdbarch_displaced_insn_fixup should have the same effects on the
1350 thread's memory and registers as stepping the instruction in place
1351 would have. Exactly which responsibilities fall to the copy and
1352 which fall to the fixup is up to the author of those functions.
1353
1354 See the comments in gdbarch.sh for details.
1355
1356 Note that displaced stepping and software single-step cannot
1357 currently be used in combination, although with some care I think
1358 they could be made to. Software single-step works by placing
1359 breakpoints on all possible subsequent instructions; if the
1360 displaced instruction is a PC-relative jump, those breakpoints
1361 could fall in very strange places --- on pages that aren't
1362 executable, or at addresses that are not proper instruction
1363 boundaries. (We do generally let other threads run while we wait
1364 to hit the software single-step breakpoint, and they might
1365 encounter such a corrupted instruction.) One way to work around
1366 this would be to have gdbarch_displaced_step_copy_insn fully
1367 simulate the effect of PC-relative instructions (and return NULL)
1368 on architectures that use software single-stepping.
1369
1370 In non-stop mode, we can have independent and simultaneous step
1371 requests, so more than one thread may need to simultaneously step
1372 over a breakpoint. The current implementation assumes there is
1373 only one scratch space per process. In this case, we have to
1374 serialize access to the scratch space. If thread A wants to step
1375 over a breakpoint, but we are currently waiting for some other
1376 thread to complete a displaced step, we leave thread A stopped and
1377 place it in the displaced_step_request_queue. Whenever a displaced
1378 step finishes, we pick the next thread in the queue and start a new
1379 displaced step operation on it. See displaced_step_prepare and
1380 displaced_step_fixup for details. */
1381
1382 struct displaced_step_request
1383 {
1384 ptid_t ptid;
1385 struct displaced_step_request *next;
1386 };
1387
1388 /* Per-inferior displaced stepping state. */
1389 struct displaced_step_inferior_state
1390 {
1391 /* Pointer to next in linked list. */
1392 struct displaced_step_inferior_state *next;
1393
1394 /* The process this displaced step state refers to. */
1395 int pid;
1396
1397 /* A queue of pending displaced stepping requests. One entry per
1398 thread that needs to do a displaced step. */
1399 struct displaced_step_request *step_request_queue;
1400
1401 /* If this is not null_ptid, this is the thread carrying out a
1402 displaced single-step in process PID. This thread's state will
1403 require fixing up once it has completed its step. */
1404 ptid_t step_ptid;
1405
1406 /* The architecture the thread had when we stepped it. */
1407 struct gdbarch *step_gdbarch;
1408
1409 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1410 for post-step cleanup. */
1411 struct displaced_step_closure *step_closure;
1412
1413 /* The address of the original instruction, and the copy we
1414 made. */
1415 CORE_ADDR step_original, step_copy;
1416
1417 /* Saved contents of copy area. */
1418 gdb_byte *step_saved_copy;
1419 };
1420
1421 /* The list of states of processes involved in displaced stepping
1422 presently. */
1423 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1424
1425 /* Get the displaced stepping state of process PID. */
1426
1427 static struct displaced_step_inferior_state *
1428 get_displaced_stepping_state (int pid)
1429 {
1430 struct displaced_step_inferior_state *state;
1431
1432 for (state = displaced_step_inferior_states;
1433 state != NULL;
1434 state = state->next)
1435 if (state->pid == pid)
1436 return state;
1437
1438 return NULL;
1439 }
1440
1441 /* Add a new displaced stepping state for process PID to the displaced
1442 stepping state list, or return a pointer to an already existing
1443 entry, if it already exists. Never returns NULL. */
1444
1445 static struct displaced_step_inferior_state *
1446 add_displaced_stepping_state (int pid)
1447 {
1448 struct displaced_step_inferior_state *state;
1449
1450 for (state = displaced_step_inferior_states;
1451 state != NULL;
1452 state = state->next)
1453 if (state->pid == pid)
1454 return state;
1455
1456 state = xcalloc (1, sizeof (*state));
1457 state->pid = pid;
1458 state->next = displaced_step_inferior_states;
1459 displaced_step_inferior_states = state;
1460
1461 return state;
1462 }
1463
1464 /* If inferior is in displaced stepping, and ADDR equals to starting address
1465 of copy area, return corresponding displaced_step_closure. Otherwise,
1466 return NULL. */
1467
1468 struct displaced_step_closure*
1469 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1470 {
1471 struct displaced_step_inferior_state *displaced
1472 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1473
1474 /* If checking the mode of displaced instruction in copy area. */
1475 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1476 && (displaced->step_copy == addr))
1477 return displaced->step_closure;
1478
1479 return NULL;
1480 }
1481
1482 /* Remove the displaced stepping state of process PID. */
1483
1484 static void
1485 remove_displaced_stepping_state (int pid)
1486 {
1487 struct displaced_step_inferior_state *it, **prev_next_p;
1488
1489 gdb_assert (pid != 0);
1490
1491 it = displaced_step_inferior_states;
1492 prev_next_p = &displaced_step_inferior_states;
1493 while (it)
1494 {
1495 if (it->pid == pid)
1496 {
1497 *prev_next_p = it->next;
1498 xfree (it);
1499 return;
1500 }
1501
1502 prev_next_p = &it->next;
1503 it = *prev_next_p;
1504 }
1505 }
1506
1507 static void
1508 infrun_inferior_exit (struct inferior *inf)
1509 {
1510 remove_displaced_stepping_state (inf->pid);
1511 }
1512
1513 /* If ON, and the architecture supports it, GDB will use displaced
1514 stepping to step over breakpoints. If OFF, or if the architecture
1515 doesn't support it, GDB will instead use the traditional
1516 hold-and-step approach. If AUTO (which is the default), GDB will
1517 decide which technique to use to step over breakpoints depending on
1518 which of all-stop or non-stop mode is active --- displaced stepping
1519 in non-stop mode; hold-and-step in all-stop mode. */
1520
1521 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1522
1523 static void
1524 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1525 struct cmd_list_element *c,
1526 const char *value)
1527 {
1528 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1529 fprintf_filtered (file,
1530 _("Debugger's willingness to use displaced stepping "
1531 "to step over breakpoints is %s (currently %s).\n"),
1532 value, non_stop ? "on" : "off");
1533 else
1534 fprintf_filtered (file,
1535 _("Debugger's willingness to use displaced stepping "
1536 "to step over breakpoints is %s.\n"), value);
1537 }
1538
1539 /* Return non-zero if displaced stepping can/should be used to step
1540 over breakpoints. */
1541
1542 static int
1543 use_displaced_stepping (struct gdbarch *gdbarch)
1544 {
1545 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1546 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1547 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1548 && find_record_target () == NULL);
1549 }
1550
1551 /* Clean out any stray displaced stepping state. */
1552 static void
1553 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1554 {
1555 /* Indicate that there is no cleanup pending. */
1556 displaced->step_ptid = null_ptid;
1557
1558 if (displaced->step_closure)
1559 {
1560 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1561 displaced->step_closure);
1562 displaced->step_closure = NULL;
1563 }
1564 }
1565
1566 static void
1567 displaced_step_clear_cleanup (void *arg)
1568 {
1569 struct displaced_step_inferior_state *state = arg;
1570
1571 displaced_step_clear (state);
1572 }
1573
1574 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1575 void
1576 displaced_step_dump_bytes (struct ui_file *file,
1577 const gdb_byte *buf,
1578 size_t len)
1579 {
1580 int i;
1581
1582 for (i = 0; i < len; i++)
1583 fprintf_unfiltered (file, "%02x ", buf[i]);
1584 fputs_unfiltered ("\n", file);
1585 }
1586
1587 /* Prepare to single-step, using displaced stepping.
1588
1589 Note that we cannot use displaced stepping when we have a signal to
1590 deliver. If we have a signal to deliver and an instruction to step
1591 over, then after the step, there will be no indication from the
1592 target whether the thread entered a signal handler or ignored the
1593 signal and stepped over the instruction successfully --- both cases
1594 result in a simple SIGTRAP. In the first case we mustn't do a
1595 fixup, and in the second case we must --- but we can't tell which.
1596 Comments in the code for 'random signals' in handle_inferior_event
1597 explain how we handle this case instead.
1598
1599 Returns 1 if preparing was successful -- this thread is going to be
1600 stepped now; or 0 if displaced stepping this thread got queued. */
1601 static int
1602 displaced_step_prepare (ptid_t ptid)
1603 {
1604 struct cleanup *old_cleanups, *ignore_cleanups;
1605 struct thread_info *tp = find_thread_ptid (ptid);
1606 struct regcache *regcache = get_thread_regcache (ptid);
1607 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1608 CORE_ADDR original, copy;
1609 ULONGEST len;
1610 struct displaced_step_closure *closure;
1611 struct displaced_step_inferior_state *displaced;
1612 int status;
1613
1614 /* We should never reach this function if the architecture does not
1615 support displaced stepping. */
1616 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1617
1618 /* Disable range stepping while executing in the scratch pad. We
1619 want a single-step even if executing the displaced instruction in
1620 the scratch buffer lands within the stepping range (e.g., a
1621 jump/branch). */
1622 tp->control.may_range_step = 0;
1623
1624 /* We have to displaced step one thread at a time, as we only have
1625 access to a single scratch space per inferior. */
1626
1627 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1628
1629 if (!ptid_equal (displaced->step_ptid, null_ptid))
1630 {
1631 /* Already waiting for a displaced step to finish. Defer this
1632 request and place in queue. */
1633 struct displaced_step_request *req, *new_req;
1634
1635 if (debug_displaced)
1636 fprintf_unfiltered (gdb_stdlog,
1637 "displaced: defering step of %s\n",
1638 target_pid_to_str (ptid));
1639
1640 new_req = xmalloc (sizeof (*new_req));
1641 new_req->ptid = ptid;
1642 new_req->next = NULL;
1643
1644 if (displaced->step_request_queue)
1645 {
1646 for (req = displaced->step_request_queue;
1647 req && req->next;
1648 req = req->next)
1649 ;
1650 req->next = new_req;
1651 }
1652 else
1653 displaced->step_request_queue = new_req;
1654
1655 return 0;
1656 }
1657 else
1658 {
1659 if (debug_displaced)
1660 fprintf_unfiltered (gdb_stdlog,
1661 "displaced: stepping %s now\n",
1662 target_pid_to_str (ptid));
1663 }
1664
1665 displaced_step_clear (displaced);
1666
1667 old_cleanups = save_inferior_ptid ();
1668 inferior_ptid = ptid;
1669
1670 original = regcache_read_pc (regcache);
1671
1672 copy = gdbarch_displaced_step_location (gdbarch);
1673 len = gdbarch_max_insn_length (gdbarch);
1674
1675 /* Save the original contents of the copy area. */
1676 displaced->step_saved_copy = xmalloc (len);
1677 ignore_cleanups = make_cleanup (free_current_contents,
1678 &displaced->step_saved_copy);
1679 status = target_read_memory (copy, displaced->step_saved_copy, len);
1680 if (status != 0)
1681 throw_error (MEMORY_ERROR,
1682 _("Error accessing memory address %s (%s) for "
1683 "displaced-stepping scratch space."),
1684 paddress (gdbarch, copy), safe_strerror (status));
1685 if (debug_displaced)
1686 {
1687 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1688 paddress (gdbarch, copy));
1689 displaced_step_dump_bytes (gdb_stdlog,
1690 displaced->step_saved_copy,
1691 len);
1692 };
1693
1694 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1695 original, copy, regcache);
1696
1697 /* We don't support the fully-simulated case at present. */
1698 gdb_assert (closure);
1699
1700 /* Save the information we need to fix things up if the step
1701 succeeds. */
1702 displaced->step_ptid = ptid;
1703 displaced->step_gdbarch = gdbarch;
1704 displaced->step_closure = closure;
1705 displaced->step_original = original;
1706 displaced->step_copy = copy;
1707
1708 make_cleanup (displaced_step_clear_cleanup, displaced);
1709
1710 /* Resume execution at the copy. */
1711 regcache_write_pc (regcache, copy);
1712
1713 discard_cleanups (ignore_cleanups);
1714
1715 do_cleanups (old_cleanups);
1716
1717 if (debug_displaced)
1718 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1719 paddress (gdbarch, copy));
1720
1721 return 1;
1722 }
1723
1724 static void
1725 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1726 const gdb_byte *myaddr, int len)
1727 {
1728 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1729
1730 inferior_ptid = ptid;
1731 write_memory (memaddr, myaddr, len);
1732 do_cleanups (ptid_cleanup);
1733 }
1734
1735 /* Restore the contents of the copy area for thread PTID. */
1736
1737 static void
1738 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1739 ptid_t ptid)
1740 {
1741 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1742
1743 write_memory_ptid (ptid, displaced->step_copy,
1744 displaced->step_saved_copy, len);
1745 if (debug_displaced)
1746 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1747 target_pid_to_str (ptid),
1748 paddress (displaced->step_gdbarch,
1749 displaced->step_copy));
1750 }
1751
1752 static void
1753 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1754 {
1755 struct cleanup *old_cleanups;
1756 struct displaced_step_inferior_state *displaced
1757 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1758
1759 /* Was any thread of this process doing a displaced step? */
1760 if (displaced == NULL)
1761 return;
1762
1763 /* Was this event for the pid we displaced? */
1764 if (ptid_equal (displaced->step_ptid, null_ptid)
1765 || ! ptid_equal (displaced->step_ptid, event_ptid))
1766 return;
1767
1768 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1769
1770 displaced_step_restore (displaced, displaced->step_ptid);
1771
1772 /* Did the instruction complete successfully? */
1773 if (signal == GDB_SIGNAL_TRAP)
1774 {
1775 /* Fix up the resulting state. */
1776 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1777 displaced->step_closure,
1778 displaced->step_original,
1779 displaced->step_copy,
1780 get_thread_regcache (displaced->step_ptid));
1781 }
1782 else
1783 {
1784 /* Since the instruction didn't complete, all we can do is
1785 relocate the PC. */
1786 struct regcache *regcache = get_thread_regcache (event_ptid);
1787 CORE_ADDR pc = regcache_read_pc (regcache);
1788
1789 pc = displaced->step_original + (pc - displaced->step_copy);
1790 regcache_write_pc (regcache, pc);
1791 }
1792
1793 do_cleanups (old_cleanups);
1794
1795 displaced->step_ptid = null_ptid;
1796
1797 /* Are there any pending displaced stepping requests? If so, run
1798 one now. Leave the state object around, since we're likely to
1799 need it again soon. */
1800 while (displaced->step_request_queue)
1801 {
1802 struct displaced_step_request *head;
1803 ptid_t ptid;
1804 struct regcache *regcache;
1805 struct gdbarch *gdbarch;
1806 CORE_ADDR actual_pc;
1807 struct address_space *aspace;
1808
1809 head = displaced->step_request_queue;
1810 ptid = head->ptid;
1811 displaced->step_request_queue = head->next;
1812 xfree (head);
1813
1814 context_switch (ptid);
1815
1816 regcache = get_thread_regcache (ptid);
1817 actual_pc = regcache_read_pc (regcache);
1818 aspace = get_regcache_aspace (regcache);
1819
1820 if (breakpoint_here_p (aspace, actual_pc))
1821 {
1822 if (debug_displaced)
1823 fprintf_unfiltered (gdb_stdlog,
1824 "displaced: stepping queued %s now\n",
1825 target_pid_to_str (ptid));
1826
1827 displaced_step_prepare (ptid);
1828
1829 gdbarch = get_regcache_arch (regcache);
1830
1831 if (debug_displaced)
1832 {
1833 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1834 gdb_byte buf[4];
1835
1836 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1837 paddress (gdbarch, actual_pc));
1838 read_memory (actual_pc, buf, sizeof (buf));
1839 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1840 }
1841
1842 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1843 displaced->step_closure))
1844 target_resume (ptid, 1, GDB_SIGNAL_0);
1845 else
1846 target_resume (ptid, 0, GDB_SIGNAL_0);
1847
1848 /* Done, we're stepping a thread. */
1849 break;
1850 }
1851 else
1852 {
1853 int step;
1854 struct thread_info *tp = inferior_thread ();
1855
1856 /* The breakpoint we were sitting under has since been
1857 removed. */
1858 tp->control.trap_expected = 0;
1859
1860 /* Go back to what we were trying to do. */
1861 step = currently_stepping (tp);
1862
1863 if (debug_displaced)
1864 fprintf_unfiltered (gdb_stdlog,
1865 "displaced: breakpoint is gone: %s, step(%d)\n",
1866 target_pid_to_str (tp->ptid), step);
1867
1868 target_resume (ptid, step, GDB_SIGNAL_0);
1869 tp->suspend.stop_signal = GDB_SIGNAL_0;
1870
1871 /* This request was discarded. See if there's any other
1872 thread waiting for its turn. */
1873 }
1874 }
1875 }
1876
1877 /* Update global variables holding ptids to hold NEW_PTID if they were
1878 holding OLD_PTID. */
1879 static void
1880 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1881 {
1882 struct displaced_step_request *it;
1883 struct displaced_step_inferior_state *displaced;
1884
1885 if (ptid_equal (inferior_ptid, old_ptid))
1886 inferior_ptid = new_ptid;
1887
1888 for (displaced = displaced_step_inferior_states;
1889 displaced;
1890 displaced = displaced->next)
1891 {
1892 if (ptid_equal (displaced->step_ptid, old_ptid))
1893 displaced->step_ptid = new_ptid;
1894
1895 for (it = displaced->step_request_queue; it; it = it->next)
1896 if (ptid_equal (it->ptid, old_ptid))
1897 it->ptid = new_ptid;
1898 }
1899 }
1900
1901 \f
1902 /* Resuming. */
1903
1904 /* Things to clean up if we QUIT out of resume (). */
1905 static void
1906 resume_cleanups (void *ignore)
1907 {
1908 if (!ptid_equal (inferior_ptid, null_ptid))
1909 delete_single_step_breakpoints (inferior_thread ());
1910
1911 normal_stop ();
1912 }
1913
1914 static const char schedlock_off[] = "off";
1915 static const char schedlock_on[] = "on";
1916 static const char schedlock_step[] = "step";
1917 static const char *const scheduler_enums[] = {
1918 schedlock_off,
1919 schedlock_on,
1920 schedlock_step,
1921 NULL
1922 };
1923 static const char *scheduler_mode = schedlock_off;
1924 static void
1925 show_scheduler_mode (struct ui_file *file, int from_tty,
1926 struct cmd_list_element *c, const char *value)
1927 {
1928 fprintf_filtered (file,
1929 _("Mode for locking scheduler "
1930 "during execution is \"%s\".\n"),
1931 value);
1932 }
1933
1934 static void
1935 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1936 {
1937 if (!target_can_lock_scheduler)
1938 {
1939 scheduler_mode = schedlock_off;
1940 error (_("Target '%s' cannot support this command."), target_shortname);
1941 }
1942 }
1943
1944 /* True if execution commands resume all threads of all processes by
1945 default; otherwise, resume only threads of the current inferior
1946 process. */
1947 int sched_multi = 0;
1948
1949 /* Try to setup for software single stepping over the specified location.
1950 Return 1 if target_resume() should use hardware single step.
1951
1952 GDBARCH the current gdbarch.
1953 PC the location to step over. */
1954
1955 static int
1956 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1957 {
1958 int hw_step = 1;
1959
1960 if (execution_direction == EXEC_FORWARD
1961 && gdbarch_software_single_step_p (gdbarch)
1962 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1963 {
1964 hw_step = 0;
1965 }
1966 return hw_step;
1967 }
1968
1969 ptid_t
1970 user_visible_resume_ptid (int step)
1971 {
1972 /* By default, resume all threads of all processes. */
1973 ptid_t resume_ptid = RESUME_ALL;
1974
1975 /* Maybe resume only all threads of the current process. */
1976 if (!sched_multi && target_supports_multi_process ())
1977 {
1978 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1979 }
1980
1981 /* Maybe resume a single thread after all. */
1982 if (non_stop)
1983 {
1984 /* With non-stop mode on, threads are always handled
1985 individually. */
1986 resume_ptid = inferior_ptid;
1987 }
1988 else if ((scheduler_mode == schedlock_on)
1989 || (scheduler_mode == schedlock_step && step))
1990 {
1991 /* User-settable 'scheduler' mode requires solo thread resume. */
1992 resume_ptid = inferior_ptid;
1993 }
1994
1995 /* We may actually resume fewer threads at first, e.g., if a thread
1996 is stopped at a breakpoint that needs stepping-off, but that
1997 should not be visible to the user/frontend, and neither should
1998 the frontend/user be allowed to proceed any of the threads that
1999 happen to be stopped for internal run control handling, if a
2000 previous command wanted them resumed. */
2001 return resume_ptid;
2002 }
2003
2004 /* Resume the inferior, but allow a QUIT. This is useful if the user
2005 wants to interrupt some lengthy single-stepping operation
2006 (for child processes, the SIGINT goes to the inferior, and so
2007 we get a SIGINT random_signal, but for remote debugging and perhaps
2008 other targets, that's not true).
2009
2010 STEP nonzero if we should step (zero to continue instead).
2011 SIG is the signal to give the inferior (zero for none). */
2012 void
2013 resume (int step, enum gdb_signal sig)
2014 {
2015 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2016 struct regcache *regcache = get_current_regcache ();
2017 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2018 struct thread_info *tp = inferior_thread ();
2019 CORE_ADDR pc = regcache_read_pc (regcache);
2020 struct address_space *aspace = get_regcache_aspace (regcache);
2021 ptid_t resume_ptid;
2022 /* From here on, this represents the caller's step vs continue
2023 request, while STEP represents what we'll actually request the
2024 target to do. STEP can decay from a step to a continue, if e.g.,
2025 we need to implement single-stepping with breakpoints (software
2026 single-step). When deciding whether "set scheduler-locking step"
2027 applies, it's the callers intention that counts. */
2028 const int entry_step = step;
2029
2030 QUIT;
2031
2032 if (current_inferior ()->waiting_for_vfork_done)
2033 {
2034 /* Don't try to single-step a vfork parent that is waiting for
2035 the child to get out of the shared memory region (by exec'ing
2036 or exiting). This is particularly important on software
2037 single-step archs, as the child process would trip on the
2038 software single step breakpoint inserted for the parent
2039 process. Since the parent will not actually execute any
2040 instruction until the child is out of the shared region (such
2041 are vfork's semantics), it is safe to simply continue it.
2042 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2043 the parent, and tell it to `keep_going', which automatically
2044 re-sets it stepping. */
2045 if (debug_infrun)
2046 fprintf_unfiltered (gdb_stdlog,
2047 "infrun: resume : clear step\n");
2048 step = 0;
2049 }
2050
2051 if (debug_infrun)
2052 fprintf_unfiltered (gdb_stdlog,
2053 "infrun: resume (step=%d, signal=%s), "
2054 "trap_expected=%d, current thread [%s] at %s\n",
2055 step, gdb_signal_to_symbol_string (sig),
2056 tp->control.trap_expected,
2057 target_pid_to_str (inferior_ptid),
2058 paddress (gdbarch, pc));
2059
2060 /* Normally, by the time we reach `resume', the breakpoints are either
2061 removed or inserted, as appropriate. The exception is if we're sitting
2062 at a permanent breakpoint; we need to step over it, but permanent
2063 breakpoints can't be removed. So we have to test for it here. */
2064 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2065 {
2066 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
2067 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2068 else
2069 error (_("\
2070 The program is stopped at a permanent breakpoint, but GDB does not know\n\
2071 how to step past a permanent breakpoint on this architecture. Try using\n\
2072 a command like `return' or `jump' to continue execution."));
2073 }
2074
2075 /* If we have a breakpoint to step over, make sure to do a single
2076 step only. Same if we have software watchpoints. */
2077 if (tp->control.trap_expected || bpstat_should_step ())
2078 tp->control.may_range_step = 0;
2079
2080 /* If enabled, step over breakpoints by executing a copy of the
2081 instruction at a different address.
2082
2083 We can't use displaced stepping when we have a signal to deliver;
2084 the comments for displaced_step_prepare explain why. The
2085 comments in the handle_inferior event for dealing with 'random
2086 signals' explain what we do instead.
2087
2088 We can't use displaced stepping when we are waiting for vfork_done
2089 event, displaced stepping breaks the vfork child similarly as single
2090 step software breakpoint. */
2091 if (use_displaced_stepping (gdbarch)
2092 && (tp->control.trap_expected
2093 || (step && gdbarch_software_single_step_p (gdbarch)))
2094 && sig == GDB_SIGNAL_0
2095 && !current_inferior ()->waiting_for_vfork_done)
2096 {
2097 struct displaced_step_inferior_state *displaced;
2098
2099 if (!displaced_step_prepare (inferior_ptid))
2100 {
2101 /* Got placed in displaced stepping queue. Will be resumed
2102 later when all the currently queued displaced stepping
2103 requests finish. The thread is not executing at this
2104 point, and the call to set_executing will be made later.
2105 But we need to call set_running here, since from the
2106 user/frontend's point of view, threads were set running.
2107 Unless we're calling an inferior function, as in that
2108 case we pretend the inferior doesn't run at all. */
2109 if (!tp->control.in_infcall)
2110 set_running (user_visible_resume_ptid (entry_step), 1);
2111 discard_cleanups (old_cleanups);
2112 return;
2113 }
2114
2115 /* Update pc to reflect the new address from which we will execute
2116 instructions due to displaced stepping. */
2117 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2118
2119 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2120 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2121 displaced->step_closure);
2122 }
2123
2124 /* Do we need to do it the hard way, w/temp breakpoints? */
2125 else if (step)
2126 step = maybe_software_singlestep (gdbarch, pc);
2127
2128 /* Currently, our software single-step implementation leads to different
2129 results than hardware single-stepping in one situation: when stepping
2130 into delivering a signal which has an associated signal handler,
2131 hardware single-step will stop at the first instruction of the handler,
2132 while software single-step will simply skip execution of the handler.
2133
2134 For now, this difference in behavior is accepted since there is no
2135 easy way to actually implement single-stepping into a signal handler
2136 without kernel support.
2137
2138 However, there is one scenario where this difference leads to follow-on
2139 problems: if we're stepping off a breakpoint by removing all breakpoints
2140 and then single-stepping. In this case, the software single-step
2141 behavior means that even if there is a *breakpoint* in the signal
2142 handler, GDB still would not stop.
2143
2144 Fortunately, we can at least fix this particular issue. We detect
2145 here the case where we are about to deliver a signal while software
2146 single-stepping with breakpoints removed. In this situation, we
2147 revert the decisions to remove all breakpoints and insert single-
2148 step breakpoints, and instead we install a step-resume breakpoint
2149 at the current address, deliver the signal without stepping, and
2150 once we arrive back at the step-resume breakpoint, actually step
2151 over the breakpoint we originally wanted to step over. */
2152 if (thread_has_single_step_breakpoints_set (tp)
2153 && sig != GDB_SIGNAL_0
2154 && step_over_info_valid_p ())
2155 {
2156 /* If we have nested signals or a pending signal is delivered
2157 immediately after a handler returns, might might already have
2158 a step-resume breakpoint set on the earlier handler. We cannot
2159 set another step-resume breakpoint; just continue on until the
2160 original breakpoint is hit. */
2161 if (tp->control.step_resume_breakpoint == NULL)
2162 {
2163 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2164 tp->step_after_step_resume_breakpoint = 1;
2165 }
2166
2167 delete_single_step_breakpoints (tp);
2168
2169 clear_step_over_info ();
2170 tp->control.trap_expected = 0;
2171
2172 insert_breakpoints ();
2173 }
2174
2175 /* If STEP is set, it's a request to use hardware stepping
2176 facilities. But in that case, we should never
2177 use singlestep breakpoint. */
2178 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2179
2180 /* Decide the set of threads to ask the target to resume. Start
2181 by assuming everything will be resumed, than narrow the set
2182 by applying increasingly restricting conditions. */
2183 resume_ptid = user_visible_resume_ptid (entry_step);
2184
2185 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2186 (e.g., we might need to step over a breakpoint), from the
2187 user/frontend's point of view, all threads in RESUME_PTID are now
2188 running. Unless we're calling an inferior function, as in that
2189 case pretend we inferior doesn't run at all. */
2190 if (!tp->control.in_infcall)
2191 set_running (resume_ptid, 1);
2192
2193 /* Maybe resume a single thread after all. */
2194 if ((step || thread_has_single_step_breakpoints_set (tp))
2195 && tp->control.trap_expected)
2196 {
2197 /* We're allowing a thread to run past a breakpoint it has
2198 hit, by single-stepping the thread with the breakpoint
2199 removed. In which case, we need to single-step only this
2200 thread, and keep others stopped, as they can miss this
2201 breakpoint if allowed to run. */
2202 resume_ptid = inferior_ptid;
2203 }
2204
2205 if (gdbarch_cannot_step_breakpoint (gdbarch))
2206 {
2207 /* Most targets can step a breakpoint instruction, thus
2208 executing it normally. But if this one cannot, just
2209 continue and we will hit it anyway. */
2210 if (step && breakpoint_inserted_here_p (aspace, pc))
2211 step = 0;
2212 }
2213
2214 if (debug_displaced
2215 && use_displaced_stepping (gdbarch)
2216 && tp->control.trap_expected)
2217 {
2218 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
2219 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2220 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2221 gdb_byte buf[4];
2222
2223 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2224 paddress (resume_gdbarch, actual_pc));
2225 read_memory (actual_pc, buf, sizeof (buf));
2226 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2227 }
2228
2229 if (tp->control.may_range_step)
2230 {
2231 /* If we're resuming a thread with the PC out of the step
2232 range, then we're doing some nested/finer run control
2233 operation, like stepping the thread out of the dynamic
2234 linker or the displaced stepping scratch pad. We
2235 shouldn't have allowed a range step then. */
2236 gdb_assert (pc_in_thread_step_range (pc, tp));
2237 }
2238
2239 /* Install inferior's terminal modes. */
2240 target_terminal_inferior ();
2241
2242 /* Avoid confusing the next resume, if the next stop/resume
2243 happens to apply to another thread. */
2244 tp->suspend.stop_signal = GDB_SIGNAL_0;
2245
2246 /* Advise target which signals may be handled silently. If we have
2247 removed breakpoints because we are stepping over one (in any
2248 thread), we need to receive all signals to avoid accidentally
2249 skipping a breakpoint during execution of a signal handler. */
2250 if (step_over_info_valid_p ())
2251 target_pass_signals (0, NULL);
2252 else
2253 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2254
2255 target_resume (resume_ptid, step, sig);
2256
2257 discard_cleanups (old_cleanups);
2258 }
2259 \f
2260 /* Proceeding. */
2261
2262 /* Clear out all variables saying what to do when inferior is continued.
2263 First do this, then set the ones you want, then call `proceed'. */
2264
2265 static void
2266 clear_proceed_status_thread (struct thread_info *tp)
2267 {
2268 if (debug_infrun)
2269 fprintf_unfiltered (gdb_stdlog,
2270 "infrun: clear_proceed_status_thread (%s)\n",
2271 target_pid_to_str (tp->ptid));
2272
2273 /* If this signal should not be seen by program, give it zero.
2274 Used for debugging signals. */
2275 if (!signal_pass_state (tp->suspend.stop_signal))
2276 tp->suspend.stop_signal = GDB_SIGNAL_0;
2277
2278 tp->control.trap_expected = 0;
2279 tp->control.step_range_start = 0;
2280 tp->control.step_range_end = 0;
2281 tp->control.may_range_step = 0;
2282 tp->control.step_frame_id = null_frame_id;
2283 tp->control.step_stack_frame_id = null_frame_id;
2284 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2285 tp->stop_requested = 0;
2286
2287 tp->control.stop_step = 0;
2288
2289 tp->control.proceed_to_finish = 0;
2290
2291 tp->control.command_interp = NULL;
2292
2293 /* Discard any remaining commands or status from previous stop. */
2294 bpstat_clear (&tp->control.stop_bpstat);
2295 }
2296
2297 void
2298 clear_proceed_status (int step)
2299 {
2300 if (!non_stop)
2301 {
2302 struct thread_info *tp;
2303 ptid_t resume_ptid;
2304
2305 resume_ptid = user_visible_resume_ptid (step);
2306
2307 /* In all-stop mode, delete the per-thread status of all threads
2308 we're about to resume, implicitly and explicitly. */
2309 ALL_NON_EXITED_THREADS (tp)
2310 {
2311 if (!ptid_match (tp->ptid, resume_ptid))
2312 continue;
2313 clear_proceed_status_thread (tp);
2314 }
2315 }
2316
2317 if (!ptid_equal (inferior_ptid, null_ptid))
2318 {
2319 struct inferior *inferior;
2320
2321 if (non_stop)
2322 {
2323 /* If in non-stop mode, only delete the per-thread status of
2324 the current thread. */
2325 clear_proceed_status_thread (inferior_thread ());
2326 }
2327
2328 inferior = current_inferior ();
2329 inferior->control.stop_soon = NO_STOP_QUIETLY;
2330 }
2331
2332 stop_after_trap = 0;
2333
2334 clear_step_over_info ();
2335
2336 observer_notify_about_to_proceed ();
2337
2338 if (stop_registers)
2339 {
2340 regcache_xfree (stop_registers);
2341 stop_registers = NULL;
2342 }
2343 }
2344
2345 /* Returns true if TP is still stopped at a breakpoint that needs
2346 stepping-over in order to make progress. If the breakpoint is gone
2347 meanwhile, we can skip the whole step-over dance. */
2348
2349 static int
2350 thread_still_needs_step_over (struct thread_info *tp)
2351 {
2352 if (tp->stepping_over_breakpoint)
2353 {
2354 struct regcache *regcache = get_thread_regcache (tp->ptid);
2355
2356 if (breakpoint_here_p (get_regcache_aspace (regcache),
2357 regcache_read_pc (regcache)))
2358 return 1;
2359
2360 tp->stepping_over_breakpoint = 0;
2361 }
2362
2363 return 0;
2364 }
2365
2366 /* Returns true if scheduler locking applies. STEP indicates whether
2367 we're about to do a step/next-like command to a thread. */
2368
2369 static int
2370 schedlock_applies (int step)
2371 {
2372 return (scheduler_mode == schedlock_on
2373 || (scheduler_mode == schedlock_step
2374 && step));
2375 }
2376
2377 /* Look a thread other than EXCEPT that has previously reported a
2378 breakpoint event, and thus needs a step-over in order to make
2379 progress. Returns NULL is none is found. STEP indicates whether
2380 we're about to step the current thread, in order to decide whether
2381 "set scheduler-locking step" applies. */
2382
2383 static struct thread_info *
2384 find_thread_needs_step_over (int step, struct thread_info *except)
2385 {
2386 struct thread_info *tp, *current;
2387
2388 /* With non-stop mode on, threads are always handled individually. */
2389 gdb_assert (! non_stop);
2390
2391 current = inferior_thread ();
2392
2393 /* If scheduler locking applies, we can avoid iterating over all
2394 threads. */
2395 if (schedlock_applies (step))
2396 {
2397 if (except != current
2398 && thread_still_needs_step_over (current))
2399 return current;
2400
2401 return NULL;
2402 }
2403
2404 ALL_NON_EXITED_THREADS (tp)
2405 {
2406 /* Ignore the EXCEPT thread. */
2407 if (tp == except)
2408 continue;
2409 /* Ignore threads of processes we're not resuming. */
2410 if (!sched_multi
2411 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2412 continue;
2413
2414 if (thread_still_needs_step_over (tp))
2415 return tp;
2416 }
2417
2418 return NULL;
2419 }
2420
2421 /* Basic routine for continuing the program in various fashions.
2422
2423 ADDR is the address to resume at, or -1 for resume where stopped.
2424 SIGGNAL is the signal to give it, or 0 for none,
2425 or -1 for act according to how it stopped.
2426 STEP is nonzero if should trap after one instruction.
2427 -1 means return after that and print nothing.
2428 You should probably set various step_... variables
2429 before calling here, if you are stepping.
2430
2431 You should call clear_proceed_status before calling proceed. */
2432
2433 void
2434 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2435 {
2436 struct regcache *regcache;
2437 struct gdbarch *gdbarch;
2438 struct thread_info *tp;
2439 CORE_ADDR pc;
2440 struct address_space *aspace;
2441
2442 /* If we're stopped at a fork/vfork, follow the branch set by the
2443 "set follow-fork-mode" command; otherwise, we'll just proceed
2444 resuming the current thread. */
2445 if (!follow_fork ())
2446 {
2447 /* The target for some reason decided not to resume. */
2448 normal_stop ();
2449 if (target_can_async_p ())
2450 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2451 return;
2452 }
2453
2454 /* We'll update this if & when we switch to a new thread. */
2455 previous_inferior_ptid = inferior_ptid;
2456
2457 regcache = get_current_regcache ();
2458 gdbarch = get_regcache_arch (regcache);
2459 aspace = get_regcache_aspace (regcache);
2460 pc = regcache_read_pc (regcache);
2461 tp = inferior_thread ();
2462
2463 if (step > 0)
2464 step_start_function = find_pc_function (pc);
2465 if (step < 0)
2466 stop_after_trap = 1;
2467
2468 /* Fill in with reasonable starting values. */
2469 init_thread_stepping_state (tp);
2470
2471 if (addr == (CORE_ADDR) -1)
2472 {
2473 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2474 && execution_direction != EXEC_REVERSE)
2475 /* There is a breakpoint at the address we will resume at,
2476 step one instruction before inserting breakpoints so that
2477 we do not stop right away (and report a second hit at this
2478 breakpoint).
2479
2480 Note, we don't do this in reverse, because we won't
2481 actually be executing the breakpoint insn anyway.
2482 We'll be (un-)executing the previous instruction. */
2483 tp->stepping_over_breakpoint = 1;
2484 else if (gdbarch_single_step_through_delay_p (gdbarch)
2485 && gdbarch_single_step_through_delay (gdbarch,
2486 get_current_frame ()))
2487 /* We stepped onto an instruction that needs to be stepped
2488 again before re-inserting the breakpoint, do so. */
2489 tp->stepping_over_breakpoint = 1;
2490 }
2491 else
2492 {
2493 regcache_write_pc (regcache, addr);
2494 }
2495
2496 if (siggnal != GDB_SIGNAL_DEFAULT)
2497 tp->suspend.stop_signal = siggnal;
2498
2499 /* Record the interpreter that issued the execution command that
2500 caused this thread to resume. If the top level interpreter is
2501 MI/async, and the execution command was a CLI command
2502 (next/step/etc.), we'll want to print stop event output to the MI
2503 console channel (the stepped-to line, etc.), as if the user
2504 entered the execution command on a real GDB console. */
2505 inferior_thread ()->control.command_interp = command_interp ();
2506
2507 if (debug_infrun)
2508 fprintf_unfiltered (gdb_stdlog,
2509 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2510 paddress (gdbarch, addr),
2511 gdb_signal_to_symbol_string (siggnal), step);
2512
2513 if (non_stop)
2514 /* In non-stop, each thread is handled individually. The context
2515 must already be set to the right thread here. */
2516 ;
2517 else
2518 {
2519 struct thread_info *step_over;
2520
2521 /* In a multi-threaded task we may select another thread and
2522 then continue or step.
2523
2524 But if the old thread was stopped at a breakpoint, it will
2525 immediately cause another breakpoint stop without any
2526 execution (i.e. it will report a breakpoint hit incorrectly).
2527 So we must step over it first.
2528
2529 Look for a thread other than the current (TP) that reported a
2530 breakpoint hit and hasn't been resumed yet since. */
2531 step_over = find_thread_needs_step_over (step, tp);
2532 if (step_over != NULL)
2533 {
2534 if (debug_infrun)
2535 fprintf_unfiltered (gdb_stdlog,
2536 "infrun: need to step-over [%s] first\n",
2537 target_pid_to_str (step_over->ptid));
2538
2539 /* Store the prev_pc for the stepping thread too, needed by
2540 switch_back_to_stepping thread. */
2541 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2542 switch_to_thread (step_over->ptid);
2543 tp = step_over;
2544 }
2545 }
2546
2547 /* If we need to step over a breakpoint, and we're not using
2548 displaced stepping to do so, insert all breakpoints (watchpoints,
2549 etc.) but the one we're stepping over, step one instruction, and
2550 then re-insert the breakpoint when that step is finished. */
2551 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2552 {
2553 struct regcache *regcache = get_current_regcache ();
2554
2555 set_step_over_info (get_regcache_aspace (regcache),
2556 regcache_read_pc (regcache), 0);
2557 }
2558 else
2559 clear_step_over_info ();
2560
2561 insert_breakpoints ();
2562
2563 tp->control.trap_expected = tp->stepping_over_breakpoint;
2564
2565 annotate_starting ();
2566
2567 /* Make sure that output from GDB appears before output from the
2568 inferior. */
2569 gdb_flush (gdb_stdout);
2570
2571 /* Refresh prev_pc value just prior to resuming. This used to be
2572 done in stop_waiting, however, setting prev_pc there did not handle
2573 scenarios such as inferior function calls or returning from
2574 a function via the return command. In those cases, the prev_pc
2575 value was not set properly for subsequent commands. The prev_pc value
2576 is used to initialize the starting line number in the ecs. With an
2577 invalid value, the gdb next command ends up stopping at the position
2578 represented by the next line table entry past our start position.
2579 On platforms that generate one line table entry per line, this
2580 is not a problem. However, on the ia64, the compiler generates
2581 extraneous line table entries that do not increase the line number.
2582 When we issue the gdb next command on the ia64 after an inferior call
2583 or a return command, we often end up a few instructions forward, still
2584 within the original line we started.
2585
2586 An attempt was made to refresh the prev_pc at the same time the
2587 execution_control_state is initialized (for instance, just before
2588 waiting for an inferior event). But this approach did not work
2589 because of platforms that use ptrace, where the pc register cannot
2590 be read unless the inferior is stopped. At that point, we are not
2591 guaranteed the inferior is stopped and so the regcache_read_pc() call
2592 can fail. Setting the prev_pc value here ensures the value is updated
2593 correctly when the inferior is stopped. */
2594 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2595
2596 /* Resume inferior. */
2597 resume (tp->control.trap_expected || step || bpstat_should_step (),
2598 tp->suspend.stop_signal);
2599
2600 /* Wait for it to stop (if not standalone)
2601 and in any case decode why it stopped, and act accordingly. */
2602 /* Do this only if we are not using the event loop, or if the target
2603 does not support asynchronous execution. */
2604 if (!target_can_async_p ())
2605 {
2606 wait_for_inferior ();
2607 normal_stop ();
2608 }
2609 }
2610 \f
2611
2612 /* Start remote-debugging of a machine over a serial link. */
2613
2614 void
2615 start_remote (int from_tty)
2616 {
2617 struct inferior *inferior;
2618
2619 inferior = current_inferior ();
2620 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2621
2622 /* Always go on waiting for the target, regardless of the mode. */
2623 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2624 indicate to wait_for_inferior that a target should timeout if
2625 nothing is returned (instead of just blocking). Because of this,
2626 targets expecting an immediate response need to, internally, set
2627 things up so that the target_wait() is forced to eventually
2628 timeout. */
2629 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2630 differentiate to its caller what the state of the target is after
2631 the initial open has been performed. Here we're assuming that
2632 the target has stopped. It should be possible to eventually have
2633 target_open() return to the caller an indication that the target
2634 is currently running and GDB state should be set to the same as
2635 for an async run. */
2636 wait_for_inferior ();
2637
2638 /* Now that the inferior has stopped, do any bookkeeping like
2639 loading shared libraries. We want to do this before normal_stop,
2640 so that the displayed frame is up to date. */
2641 post_create_inferior (&current_target, from_tty);
2642
2643 normal_stop ();
2644 }
2645
2646 /* Initialize static vars when a new inferior begins. */
2647
2648 void
2649 init_wait_for_inferior (void)
2650 {
2651 /* These are meaningless until the first time through wait_for_inferior. */
2652
2653 breakpoint_init_inferior (inf_starting);
2654
2655 clear_proceed_status (0);
2656
2657 target_last_wait_ptid = minus_one_ptid;
2658
2659 previous_inferior_ptid = inferior_ptid;
2660
2661 /* Discard any skipped inlined frames. */
2662 clear_inline_frame_state (minus_one_ptid);
2663 }
2664
2665 \f
2666 /* This enum encodes possible reasons for doing a target_wait, so that
2667 wfi can call target_wait in one place. (Ultimately the call will be
2668 moved out of the infinite loop entirely.) */
2669
2670 enum infwait_states
2671 {
2672 infwait_normal_state,
2673 infwait_step_watch_state,
2674 infwait_nonstep_watch_state
2675 };
2676
2677 /* Current inferior wait state. */
2678 static enum infwait_states infwait_state;
2679
2680 /* Data to be passed around while handling an event. This data is
2681 discarded between events. */
2682 struct execution_control_state
2683 {
2684 ptid_t ptid;
2685 /* The thread that got the event, if this was a thread event; NULL
2686 otherwise. */
2687 struct thread_info *event_thread;
2688
2689 struct target_waitstatus ws;
2690 int stop_func_filled_in;
2691 CORE_ADDR stop_func_start;
2692 CORE_ADDR stop_func_end;
2693 const char *stop_func_name;
2694 int wait_some_more;
2695
2696 /* True if the event thread hit the single-step breakpoint of
2697 another thread. Thus the event doesn't cause a stop, the thread
2698 needs to be single-stepped past the single-step breakpoint before
2699 we can switch back to the original stepping thread. */
2700 int hit_singlestep_breakpoint;
2701 };
2702
2703 static void handle_inferior_event (struct execution_control_state *ecs);
2704
2705 static void handle_step_into_function (struct gdbarch *gdbarch,
2706 struct execution_control_state *ecs);
2707 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2708 struct execution_control_state *ecs);
2709 static void handle_signal_stop (struct execution_control_state *ecs);
2710 static void check_exception_resume (struct execution_control_state *,
2711 struct frame_info *);
2712
2713 static void end_stepping_range (struct execution_control_state *ecs);
2714 static void stop_waiting (struct execution_control_state *ecs);
2715 static void prepare_to_wait (struct execution_control_state *ecs);
2716 static void keep_going (struct execution_control_state *ecs);
2717 static void process_event_stop_test (struct execution_control_state *ecs);
2718 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2719
2720 /* Callback for iterate over threads. If the thread is stopped, but
2721 the user/frontend doesn't know about that yet, go through
2722 normal_stop, as if the thread had just stopped now. ARG points at
2723 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2724 ptid_is_pid(PTID) is true, applies to all threads of the process
2725 pointed at by PTID. Otherwise, apply only to the thread pointed by
2726 PTID. */
2727
2728 static int
2729 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2730 {
2731 ptid_t ptid = * (ptid_t *) arg;
2732
2733 if ((ptid_equal (info->ptid, ptid)
2734 || ptid_equal (minus_one_ptid, ptid)
2735 || (ptid_is_pid (ptid)
2736 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2737 && is_running (info->ptid)
2738 && !is_executing (info->ptid))
2739 {
2740 struct cleanup *old_chain;
2741 struct execution_control_state ecss;
2742 struct execution_control_state *ecs = &ecss;
2743
2744 memset (ecs, 0, sizeof (*ecs));
2745
2746 old_chain = make_cleanup_restore_current_thread ();
2747
2748 overlay_cache_invalid = 1;
2749 /* Flush target cache before starting to handle each event.
2750 Target was running and cache could be stale. This is just a
2751 heuristic. Running threads may modify target memory, but we
2752 don't get any event. */
2753 target_dcache_invalidate ();
2754
2755 /* Go through handle_inferior_event/normal_stop, so we always
2756 have consistent output as if the stop event had been
2757 reported. */
2758 ecs->ptid = info->ptid;
2759 ecs->event_thread = find_thread_ptid (info->ptid);
2760 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2761 ecs->ws.value.sig = GDB_SIGNAL_0;
2762
2763 handle_inferior_event (ecs);
2764
2765 if (!ecs->wait_some_more)
2766 {
2767 struct thread_info *tp;
2768
2769 normal_stop ();
2770
2771 /* Finish off the continuations. */
2772 tp = inferior_thread ();
2773 do_all_intermediate_continuations_thread (tp, 1);
2774 do_all_continuations_thread (tp, 1);
2775 }
2776
2777 do_cleanups (old_chain);
2778 }
2779
2780 return 0;
2781 }
2782
2783 /* This function is attached as a "thread_stop_requested" observer.
2784 Cleanup local state that assumed the PTID was to be resumed, and
2785 report the stop to the frontend. */
2786
2787 static void
2788 infrun_thread_stop_requested (ptid_t ptid)
2789 {
2790 struct displaced_step_inferior_state *displaced;
2791
2792 /* PTID was requested to stop. Remove it from the displaced
2793 stepping queue, so we don't try to resume it automatically. */
2794
2795 for (displaced = displaced_step_inferior_states;
2796 displaced;
2797 displaced = displaced->next)
2798 {
2799 struct displaced_step_request *it, **prev_next_p;
2800
2801 it = displaced->step_request_queue;
2802 prev_next_p = &displaced->step_request_queue;
2803 while (it)
2804 {
2805 if (ptid_match (it->ptid, ptid))
2806 {
2807 *prev_next_p = it->next;
2808 it->next = NULL;
2809 xfree (it);
2810 }
2811 else
2812 {
2813 prev_next_p = &it->next;
2814 }
2815
2816 it = *prev_next_p;
2817 }
2818 }
2819
2820 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2821 }
2822
2823 static void
2824 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2825 {
2826 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2827 nullify_last_target_wait_ptid ();
2828 }
2829
2830 /* Delete the step resume, single-step and longjmp/exception resume
2831 breakpoints of TP. */
2832
2833 static void
2834 delete_thread_infrun_breakpoints (struct thread_info *tp)
2835 {
2836 delete_step_resume_breakpoint (tp);
2837 delete_exception_resume_breakpoint (tp);
2838 delete_single_step_breakpoints (tp);
2839 }
2840
2841 /* If the target still has execution, call FUNC for each thread that
2842 just stopped. In all-stop, that's all the non-exited threads; in
2843 non-stop, that's the current thread, only. */
2844
2845 typedef void (*for_each_just_stopped_thread_callback_func)
2846 (struct thread_info *tp);
2847
2848 static void
2849 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
2850 {
2851 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
2852 return;
2853
2854 if (non_stop)
2855 {
2856 /* If in non-stop mode, only the current thread stopped. */
2857 func (inferior_thread ());
2858 }
2859 else
2860 {
2861 struct thread_info *tp;
2862
2863 /* In all-stop mode, all threads have stopped. */
2864 ALL_NON_EXITED_THREADS (tp)
2865 {
2866 func (tp);
2867 }
2868 }
2869 }
2870
2871 /* Delete the step resume and longjmp/exception resume breakpoints of
2872 the threads that just stopped. */
2873
2874 static void
2875 delete_just_stopped_threads_infrun_breakpoints (void)
2876 {
2877 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
2878 }
2879
2880 /* Delete the single-step breakpoints of the threads that just
2881 stopped. */
2882
2883 static void
2884 delete_just_stopped_threads_single_step_breakpoints (void)
2885 {
2886 for_each_just_stopped_thread (delete_single_step_breakpoints);
2887 }
2888
2889 /* A cleanup wrapper. */
2890
2891 static void
2892 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
2893 {
2894 delete_just_stopped_threads_infrun_breakpoints ();
2895 }
2896
2897 /* Pretty print the results of target_wait, for debugging purposes. */
2898
2899 static void
2900 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2901 const struct target_waitstatus *ws)
2902 {
2903 char *status_string = target_waitstatus_to_string (ws);
2904 struct ui_file *tmp_stream = mem_fileopen ();
2905 char *text;
2906
2907 /* The text is split over several lines because it was getting too long.
2908 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2909 output as a unit; we want only one timestamp printed if debug_timestamp
2910 is set. */
2911
2912 fprintf_unfiltered (tmp_stream,
2913 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2914 if (ptid_get_pid (waiton_ptid) != -1)
2915 fprintf_unfiltered (tmp_stream,
2916 " [%s]", target_pid_to_str (waiton_ptid));
2917 fprintf_unfiltered (tmp_stream, ", status) =\n");
2918 fprintf_unfiltered (tmp_stream,
2919 "infrun: %d [%s],\n",
2920 ptid_get_pid (result_ptid),
2921 target_pid_to_str (result_ptid));
2922 fprintf_unfiltered (tmp_stream,
2923 "infrun: %s\n",
2924 status_string);
2925
2926 text = ui_file_xstrdup (tmp_stream, NULL);
2927
2928 /* This uses %s in part to handle %'s in the text, but also to avoid
2929 a gcc error: the format attribute requires a string literal. */
2930 fprintf_unfiltered (gdb_stdlog, "%s", text);
2931
2932 xfree (status_string);
2933 xfree (text);
2934 ui_file_delete (tmp_stream);
2935 }
2936
2937 /* Prepare and stabilize the inferior for detaching it. E.g.,
2938 detaching while a thread is displaced stepping is a recipe for
2939 crashing it, as nothing would readjust the PC out of the scratch
2940 pad. */
2941
2942 void
2943 prepare_for_detach (void)
2944 {
2945 struct inferior *inf = current_inferior ();
2946 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2947 struct cleanup *old_chain_1;
2948 struct displaced_step_inferior_state *displaced;
2949
2950 displaced = get_displaced_stepping_state (inf->pid);
2951
2952 /* Is any thread of this process displaced stepping? If not,
2953 there's nothing else to do. */
2954 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2955 return;
2956
2957 if (debug_infrun)
2958 fprintf_unfiltered (gdb_stdlog,
2959 "displaced-stepping in-process while detaching");
2960
2961 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2962 inf->detaching = 1;
2963
2964 while (!ptid_equal (displaced->step_ptid, null_ptid))
2965 {
2966 struct cleanup *old_chain_2;
2967 struct execution_control_state ecss;
2968 struct execution_control_state *ecs;
2969
2970 ecs = &ecss;
2971 memset (ecs, 0, sizeof (*ecs));
2972
2973 overlay_cache_invalid = 1;
2974 /* Flush target cache before starting to handle each event.
2975 Target was running and cache could be stale. This is just a
2976 heuristic. Running threads may modify target memory, but we
2977 don't get any event. */
2978 target_dcache_invalidate ();
2979
2980 if (deprecated_target_wait_hook)
2981 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2982 else
2983 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2984
2985 if (debug_infrun)
2986 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2987
2988 /* If an error happens while handling the event, propagate GDB's
2989 knowledge of the executing state to the frontend/user running
2990 state. */
2991 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2992 &minus_one_ptid);
2993
2994 /* Now figure out what to do with the result of the result. */
2995 handle_inferior_event (ecs);
2996
2997 /* No error, don't finish the state yet. */
2998 discard_cleanups (old_chain_2);
2999
3000 /* Breakpoints and watchpoints are not installed on the target
3001 at this point, and signals are passed directly to the
3002 inferior, so this must mean the process is gone. */
3003 if (!ecs->wait_some_more)
3004 {
3005 discard_cleanups (old_chain_1);
3006 error (_("Program exited while detaching"));
3007 }
3008 }
3009
3010 discard_cleanups (old_chain_1);
3011 }
3012
3013 /* Wait for control to return from inferior to debugger.
3014
3015 If inferior gets a signal, we may decide to start it up again
3016 instead of returning. That is why there is a loop in this function.
3017 When this function actually returns it means the inferior
3018 should be left stopped and GDB should read more commands. */
3019
3020 void
3021 wait_for_inferior (void)
3022 {
3023 struct cleanup *old_cleanups;
3024
3025 if (debug_infrun)
3026 fprintf_unfiltered
3027 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3028
3029 old_cleanups
3030 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3031 NULL);
3032
3033 while (1)
3034 {
3035 struct execution_control_state ecss;
3036 struct execution_control_state *ecs = &ecss;
3037 struct cleanup *old_chain;
3038 ptid_t waiton_ptid = minus_one_ptid;
3039
3040 memset (ecs, 0, sizeof (*ecs));
3041
3042 overlay_cache_invalid = 1;
3043
3044 /* Flush target cache before starting to handle each event.
3045 Target was running and cache could be stale. This is just a
3046 heuristic. Running threads may modify target memory, but we
3047 don't get any event. */
3048 target_dcache_invalidate ();
3049
3050 if (deprecated_target_wait_hook)
3051 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3052 else
3053 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3054
3055 if (debug_infrun)
3056 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3057
3058 /* If an error happens while handling the event, propagate GDB's
3059 knowledge of the executing state to the frontend/user running
3060 state. */
3061 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3062
3063 /* Now figure out what to do with the result of the result. */
3064 handle_inferior_event (ecs);
3065
3066 /* No error, don't finish the state yet. */
3067 discard_cleanups (old_chain);
3068
3069 if (!ecs->wait_some_more)
3070 break;
3071 }
3072
3073 do_cleanups (old_cleanups);
3074 }
3075
3076 /* Asynchronous version of wait_for_inferior. It is called by the
3077 event loop whenever a change of state is detected on the file
3078 descriptor corresponding to the target. It can be called more than
3079 once to complete a single execution command. In such cases we need
3080 to keep the state in a global variable ECSS. If it is the last time
3081 that this function is called for a single execution command, then
3082 report to the user that the inferior has stopped, and do the
3083 necessary cleanups. */
3084
3085 void
3086 fetch_inferior_event (void *client_data)
3087 {
3088 struct execution_control_state ecss;
3089 struct execution_control_state *ecs = &ecss;
3090 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3091 struct cleanup *ts_old_chain;
3092 int was_sync = sync_execution;
3093 int cmd_done = 0;
3094 ptid_t waiton_ptid = minus_one_ptid;
3095
3096 memset (ecs, 0, sizeof (*ecs));
3097
3098 /* We're handling a live event, so make sure we're doing live
3099 debugging. If we're looking at traceframes while the target is
3100 running, we're going to need to get back to that mode after
3101 handling the event. */
3102 if (non_stop)
3103 {
3104 make_cleanup_restore_current_traceframe ();
3105 set_current_traceframe (-1);
3106 }
3107
3108 if (non_stop)
3109 /* In non-stop mode, the user/frontend should not notice a thread
3110 switch due to internal events. Make sure we reverse to the
3111 user selected thread and frame after handling the event and
3112 running any breakpoint commands. */
3113 make_cleanup_restore_current_thread ();
3114
3115 overlay_cache_invalid = 1;
3116 /* Flush target cache before starting to handle each event. Target
3117 was running and cache could be stale. This is just a heuristic.
3118 Running threads may modify target memory, but we don't get any
3119 event. */
3120 target_dcache_invalidate ();
3121
3122 make_cleanup_restore_integer (&execution_direction);
3123 execution_direction = target_execution_direction ();
3124
3125 if (deprecated_target_wait_hook)
3126 ecs->ptid =
3127 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3128 else
3129 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3130
3131 if (debug_infrun)
3132 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3133
3134 /* If an error happens while handling the event, propagate GDB's
3135 knowledge of the executing state to the frontend/user running
3136 state. */
3137 if (!non_stop)
3138 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3139 else
3140 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3141
3142 /* Get executed before make_cleanup_restore_current_thread above to apply
3143 still for the thread which has thrown the exception. */
3144 make_bpstat_clear_actions_cleanup ();
3145
3146 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3147
3148 /* Now figure out what to do with the result of the result. */
3149 handle_inferior_event (ecs);
3150
3151 if (!ecs->wait_some_more)
3152 {
3153 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3154
3155 delete_just_stopped_threads_infrun_breakpoints ();
3156
3157 /* We may not find an inferior if this was a process exit. */
3158 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3159 normal_stop ();
3160
3161 if (target_has_execution
3162 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3163 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3164 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3165 && ecs->event_thread->step_multi
3166 && ecs->event_thread->control.stop_step)
3167 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3168 else
3169 {
3170 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3171 cmd_done = 1;
3172 }
3173 }
3174
3175 /* No error, don't finish the thread states yet. */
3176 discard_cleanups (ts_old_chain);
3177
3178 /* Revert thread and frame. */
3179 do_cleanups (old_chain);
3180
3181 /* If the inferior was in sync execution mode, and now isn't,
3182 restore the prompt (a synchronous execution command has finished,
3183 and we're ready for input). */
3184 if (interpreter_async && was_sync && !sync_execution)
3185 observer_notify_sync_execution_done ();
3186
3187 if (cmd_done
3188 && !was_sync
3189 && exec_done_display_p
3190 && (ptid_equal (inferior_ptid, null_ptid)
3191 || !is_running (inferior_ptid)))
3192 printf_unfiltered (_("completed.\n"));
3193 }
3194
3195 /* Record the frame and location we're currently stepping through. */
3196 void
3197 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3198 {
3199 struct thread_info *tp = inferior_thread ();
3200
3201 tp->control.step_frame_id = get_frame_id (frame);
3202 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3203
3204 tp->current_symtab = sal.symtab;
3205 tp->current_line = sal.line;
3206 }
3207
3208 /* Clear context switchable stepping state. */
3209
3210 void
3211 init_thread_stepping_state (struct thread_info *tss)
3212 {
3213 tss->stepping_over_breakpoint = 0;
3214 tss->stepping_over_watchpoint = 0;
3215 tss->step_after_step_resume_breakpoint = 0;
3216 }
3217
3218 /* Set the cached copy of the last ptid/waitstatus. */
3219
3220 static void
3221 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3222 {
3223 target_last_wait_ptid = ptid;
3224 target_last_waitstatus = status;
3225 }
3226
3227 /* Return the cached copy of the last pid/waitstatus returned by
3228 target_wait()/deprecated_target_wait_hook(). The data is actually
3229 cached by handle_inferior_event(), which gets called immediately
3230 after target_wait()/deprecated_target_wait_hook(). */
3231
3232 void
3233 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3234 {
3235 *ptidp = target_last_wait_ptid;
3236 *status = target_last_waitstatus;
3237 }
3238
3239 void
3240 nullify_last_target_wait_ptid (void)
3241 {
3242 target_last_wait_ptid = minus_one_ptid;
3243 }
3244
3245 /* Switch thread contexts. */
3246
3247 static void
3248 context_switch (ptid_t ptid)
3249 {
3250 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3251 {
3252 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3253 target_pid_to_str (inferior_ptid));
3254 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3255 target_pid_to_str (ptid));
3256 }
3257
3258 switch_to_thread (ptid);
3259 }
3260
3261 static void
3262 adjust_pc_after_break (struct execution_control_state *ecs)
3263 {
3264 struct regcache *regcache;
3265 struct gdbarch *gdbarch;
3266 struct address_space *aspace;
3267 CORE_ADDR breakpoint_pc, decr_pc;
3268
3269 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3270 we aren't, just return.
3271
3272 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3273 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3274 implemented by software breakpoints should be handled through the normal
3275 breakpoint layer.
3276
3277 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3278 different signals (SIGILL or SIGEMT for instance), but it is less
3279 clear where the PC is pointing afterwards. It may not match
3280 gdbarch_decr_pc_after_break. I don't know any specific target that
3281 generates these signals at breakpoints (the code has been in GDB since at
3282 least 1992) so I can not guess how to handle them here.
3283
3284 In earlier versions of GDB, a target with
3285 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3286 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3287 target with both of these set in GDB history, and it seems unlikely to be
3288 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3289
3290 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3291 return;
3292
3293 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3294 return;
3295
3296 /* In reverse execution, when a breakpoint is hit, the instruction
3297 under it has already been de-executed. The reported PC always
3298 points at the breakpoint address, so adjusting it further would
3299 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3300 architecture:
3301
3302 B1 0x08000000 : INSN1
3303 B2 0x08000001 : INSN2
3304 0x08000002 : INSN3
3305 PC -> 0x08000003 : INSN4
3306
3307 Say you're stopped at 0x08000003 as above. Reverse continuing
3308 from that point should hit B2 as below. Reading the PC when the
3309 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3310 been de-executed already.
3311
3312 B1 0x08000000 : INSN1
3313 B2 PC -> 0x08000001 : INSN2
3314 0x08000002 : INSN3
3315 0x08000003 : INSN4
3316
3317 We can't apply the same logic as for forward execution, because
3318 we would wrongly adjust the PC to 0x08000000, since there's a
3319 breakpoint at PC - 1. We'd then report a hit on B1, although
3320 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3321 behaviour. */
3322 if (execution_direction == EXEC_REVERSE)
3323 return;
3324
3325 /* If this target does not decrement the PC after breakpoints, then
3326 we have nothing to do. */
3327 regcache = get_thread_regcache (ecs->ptid);
3328 gdbarch = get_regcache_arch (regcache);
3329
3330 decr_pc = target_decr_pc_after_break (gdbarch);
3331 if (decr_pc == 0)
3332 return;
3333
3334 aspace = get_regcache_aspace (regcache);
3335
3336 /* Find the location where (if we've hit a breakpoint) the
3337 breakpoint would be. */
3338 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3339
3340 /* Check whether there actually is a software breakpoint inserted at
3341 that location.
3342
3343 If in non-stop mode, a race condition is possible where we've
3344 removed a breakpoint, but stop events for that breakpoint were
3345 already queued and arrive later. To suppress those spurious
3346 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3347 and retire them after a number of stop events are reported. */
3348 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3349 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3350 {
3351 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3352
3353 if (record_full_is_used ())
3354 record_full_gdb_operation_disable_set ();
3355
3356 /* When using hardware single-step, a SIGTRAP is reported for both
3357 a completed single-step and a software breakpoint. Need to
3358 differentiate between the two, as the latter needs adjusting
3359 but the former does not.
3360
3361 The SIGTRAP can be due to a completed hardware single-step only if
3362 - we didn't insert software single-step breakpoints
3363 - the thread to be examined is still the current thread
3364 - this thread is currently being stepped
3365
3366 If any of these events did not occur, we must have stopped due
3367 to hitting a software breakpoint, and have to back up to the
3368 breakpoint address.
3369
3370 As a special case, we could have hardware single-stepped a
3371 software breakpoint. In this case (prev_pc == breakpoint_pc),
3372 we also need to back up to the breakpoint address. */
3373
3374 if (thread_has_single_step_breakpoints_set (ecs->event_thread)
3375 || !ptid_equal (ecs->ptid, inferior_ptid)
3376 || !currently_stepping (ecs->event_thread)
3377 || ecs->event_thread->prev_pc == breakpoint_pc)
3378 regcache_write_pc (regcache, breakpoint_pc);
3379
3380 do_cleanups (old_cleanups);
3381 }
3382 }
3383
3384 static int
3385 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3386 {
3387 for (frame = get_prev_frame (frame);
3388 frame != NULL;
3389 frame = get_prev_frame (frame))
3390 {
3391 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3392 return 1;
3393 if (get_frame_type (frame) != INLINE_FRAME)
3394 break;
3395 }
3396
3397 return 0;
3398 }
3399
3400 /* Auxiliary function that handles syscall entry/return events.
3401 It returns 1 if the inferior should keep going (and GDB
3402 should ignore the event), or 0 if the event deserves to be
3403 processed. */
3404
3405 static int
3406 handle_syscall_event (struct execution_control_state *ecs)
3407 {
3408 struct regcache *regcache;
3409 int syscall_number;
3410
3411 if (!ptid_equal (ecs->ptid, inferior_ptid))
3412 context_switch (ecs->ptid);
3413
3414 regcache = get_thread_regcache (ecs->ptid);
3415 syscall_number = ecs->ws.value.syscall_number;
3416 stop_pc = regcache_read_pc (regcache);
3417
3418 if (catch_syscall_enabled () > 0
3419 && catching_syscall_number (syscall_number) > 0)
3420 {
3421 if (debug_infrun)
3422 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3423 syscall_number);
3424
3425 ecs->event_thread->control.stop_bpstat
3426 = bpstat_stop_status (get_regcache_aspace (regcache),
3427 stop_pc, ecs->ptid, &ecs->ws);
3428
3429 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3430 {
3431 /* Catchpoint hit. */
3432 return 0;
3433 }
3434 }
3435
3436 /* If no catchpoint triggered for this, then keep going. */
3437 keep_going (ecs);
3438 return 1;
3439 }
3440
3441 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3442
3443 static void
3444 fill_in_stop_func (struct gdbarch *gdbarch,
3445 struct execution_control_state *ecs)
3446 {
3447 if (!ecs->stop_func_filled_in)
3448 {
3449 /* Don't care about return value; stop_func_start and stop_func_name
3450 will both be 0 if it doesn't work. */
3451 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3452 &ecs->stop_func_start, &ecs->stop_func_end);
3453 ecs->stop_func_start
3454 += gdbarch_deprecated_function_start_offset (gdbarch);
3455
3456 if (gdbarch_skip_entrypoint_p (gdbarch))
3457 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3458 ecs->stop_func_start);
3459
3460 ecs->stop_func_filled_in = 1;
3461 }
3462 }
3463
3464
3465 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3466
3467 static enum stop_kind
3468 get_inferior_stop_soon (ptid_t ptid)
3469 {
3470 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3471
3472 gdb_assert (inf != NULL);
3473 return inf->control.stop_soon;
3474 }
3475
3476 /* Given an execution control state that has been freshly filled in by
3477 an event from the inferior, figure out what it means and take
3478 appropriate action.
3479
3480 The alternatives are:
3481
3482 1) stop_waiting and return; to really stop and return to the
3483 debugger.
3484
3485 2) keep_going and return; to wait for the next event (set
3486 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3487 once). */
3488
3489 static void
3490 handle_inferior_event (struct execution_control_state *ecs)
3491 {
3492 enum stop_kind stop_soon;
3493
3494 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3495 {
3496 /* We had an event in the inferior, but we are not interested in
3497 handling it at this level. The lower layers have already
3498 done what needs to be done, if anything.
3499
3500 One of the possible circumstances for this is when the
3501 inferior produces output for the console. The inferior has
3502 not stopped, and we are ignoring the event. Another possible
3503 circumstance is any event which the lower level knows will be
3504 reported multiple times without an intervening resume. */
3505 if (debug_infrun)
3506 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3507 prepare_to_wait (ecs);
3508 return;
3509 }
3510
3511 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3512 && target_can_async_p () && !sync_execution)
3513 {
3514 /* There were no unwaited-for children left in the target, but,
3515 we're not synchronously waiting for events either. Just
3516 ignore. Otherwise, if we were running a synchronous
3517 execution command, we need to cancel it and give the user
3518 back the terminal. */
3519 if (debug_infrun)
3520 fprintf_unfiltered (gdb_stdlog,
3521 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3522 prepare_to_wait (ecs);
3523 return;
3524 }
3525
3526 /* Cache the last pid/waitstatus. */
3527 set_last_target_status (ecs->ptid, ecs->ws);
3528
3529 /* Always clear state belonging to the previous time we stopped. */
3530 stop_stack_dummy = STOP_NONE;
3531
3532 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3533 {
3534 /* No unwaited-for children left. IOW, all resumed children
3535 have exited. */
3536 if (debug_infrun)
3537 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3538
3539 stop_print_frame = 0;
3540 stop_waiting (ecs);
3541 return;
3542 }
3543
3544 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3545 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3546 {
3547 ecs->event_thread = find_thread_ptid (ecs->ptid);
3548 /* If it's a new thread, add it to the thread database. */
3549 if (ecs->event_thread == NULL)
3550 ecs->event_thread = add_thread (ecs->ptid);
3551
3552 /* Disable range stepping. If the next step request could use a
3553 range, this will be end up re-enabled then. */
3554 ecs->event_thread->control.may_range_step = 0;
3555 }
3556
3557 /* Dependent on valid ECS->EVENT_THREAD. */
3558 adjust_pc_after_break (ecs);
3559
3560 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3561 reinit_frame_cache ();
3562
3563 breakpoint_retire_moribund ();
3564
3565 /* First, distinguish signals caused by the debugger from signals
3566 that have to do with the program's own actions. Note that
3567 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3568 on the operating system version. Here we detect when a SIGILL or
3569 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3570 something similar for SIGSEGV, since a SIGSEGV will be generated
3571 when we're trying to execute a breakpoint instruction on a
3572 non-executable stack. This happens for call dummy breakpoints
3573 for architectures like SPARC that place call dummies on the
3574 stack. */
3575 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3576 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3577 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3578 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3579 {
3580 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3581
3582 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3583 regcache_read_pc (regcache)))
3584 {
3585 if (debug_infrun)
3586 fprintf_unfiltered (gdb_stdlog,
3587 "infrun: Treating signal as SIGTRAP\n");
3588 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3589 }
3590 }
3591
3592 /* Mark the non-executing threads accordingly. In all-stop, all
3593 threads of all processes are stopped when we get any event
3594 reported. In non-stop mode, only the event thread stops. If
3595 we're handling a process exit in non-stop mode, there's nothing
3596 to do, as threads of the dead process are gone, and threads of
3597 any other process were left running. */
3598 if (!non_stop)
3599 set_executing (minus_one_ptid, 0);
3600 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3601 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3602 set_executing (ecs->ptid, 0);
3603
3604 switch (ecs->ws.kind)
3605 {
3606 case TARGET_WAITKIND_LOADED:
3607 if (debug_infrun)
3608 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3609 if (!ptid_equal (ecs->ptid, inferior_ptid))
3610 context_switch (ecs->ptid);
3611 /* Ignore gracefully during startup of the inferior, as it might
3612 be the shell which has just loaded some objects, otherwise
3613 add the symbols for the newly loaded objects. Also ignore at
3614 the beginning of an attach or remote session; we will query
3615 the full list of libraries once the connection is
3616 established. */
3617
3618 stop_soon = get_inferior_stop_soon (ecs->ptid);
3619 if (stop_soon == NO_STOP_QUIETLY)
3620 {
3621 struct regcache *regcache;
3622
3623 regcache = get_thread_regcache (ecs->ptid);
3624
3625 handle_solib_event ();
3626
3627 ecs->event_thread->control.stop_bpstat
3628 = bpstat_stop_status (get_regcache_aspace (regcache),
3629 stop_pc, ecs->ptid, &ecs->ws);
3630
3631 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3632 {
3633 /* A catchpoint triggered. */
3634 process_event_stop_test (ecs);
3635 return;
3636 }
3637
3638 /* If requested, stop when the dynamic linker notifies
3639 gdb of events. This allows the user to get control
3640 and place breakpoints in initializer routines for
3641 dynamically loaded objects (among other things). */
3642 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3643 if (stop_on_solib_events)
3644 {
3645 /* Make sure we print "Stopped due to solib-event" in
3646 normal_stop. */
3647 stop_print_frame = 1;
3648
3649 stop_waiting (ecs);
3650 return;
3651 }
3652 }
3653
3654 /* If we are skipping through a shell, or through shared library
3655 loading that we aren't interested in, resume the program. If
3656 we're running the program normally, also resume. */
3657 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3658 {
3659 /* Loading of shared libraries might have changed breakpoint
3660 addresses. Make sure new breakpoints are inserted. */
3661 if (stop_soon == NO_STOP_QUIETLY)
3662 insert_breakpoints ();
3663 resume (0, GDB_SIGNAL_0);
3664 prepare_to_wait (ecs);
3665 return;
3666 }
3667
3668 /* But stop if we're attaching or setting up a remote
3669 connection. */
3670 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3671 || stop_soon == STOP_QUIETLY_REMOTE)
3672 {
3673 if (debug_infrun)
3674 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3675 stop_waiting (ecs);
3676 return;
3677 }
3678
3679 internal_error (__FILE__, __LINE__,
3680 _("unhandled stop_soon: %d"), (int) stop_soon);
3681
3682 case TARGET_WAITKIND_SPURIOUS:
3683 if (debug_infrun)
3684 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3685 if (!ptid_equal (ecs->ptid, inferior_ptid))
3686 context_switch (ecs->ptid);
3687 resume (0, GDB_SIGNAL_0);
3688 prepare_to_wait (ecs);
3689 return;
3690
3691 case TARGET_WAITKIND_EXITED:
3692 case TARGET_WAITKIND_SIGNALLED:
3693 if (debug_infrun)
3694 {
3695 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3696 fprintf_unfiltered (gdb_stdlog,
3697 "infrun: TARGET_WAITKIND_EXITED\n");
3698 else
3699 fprintf_unfiltered (gdb_stdlog,
3700 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3701 }
3702
3703 inferior_ptid = ecs->ptid;
3704 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3705 set_current_program_space (current_inferior ()->pspace);
3706 handle_vfork_child_exec_or_exit (0);
3707 target_terminal_ours (); /* Must do this before mourn anyway. */
3708
3709 /* Clearing any previous state of convenience variables. */
3710 clear_exit_convenience_vars ();
3711
3712 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3713 {
3714 /* Record the exit code in the convenience variable $_exitcode, so
3715 that the user can inspect this again later. */
3716 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3717 (LONGEST) ecs->ws.value.integer);
3718
3719 /* Also record this in the inferior itself. */
3720 current_inferior ()->has_exit_code = 1;
3721 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3722
3723 /* Support the --return-child-result option. */
3724 return_child_result_value = ecs->ws.value.integer;
3725
3726 observer_notify_exited (ecs->ws.value.integer);
3727 }
3728 else
3729 {
3730 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3731 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3732
3733 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3734 {
3735 /* Set the value of the internal variable $_exitsignal,
3736 which holds the signal uncaught by the inferior. */
3737 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3738 gdbarch_gdb_signal_to_target (gdbarch,
3739 ecs->ws.value.sig));
3740 }
3741 else
3742 {
3743 /* We don't have access to the target's method used for
3744 converting between signal numbers (GDB's internal
3745 representation <-> target's representation).
3746 Therefore, we cannot do a good job at displaying this
3747 information to the user. It's better to just warn
3748 her about it (if infrun debugging is enabled), and
3749 give up. */
3750 if (debug_infrun)
3751 fprintf_filtered (gdb_stdlog, _("\
3752 Cannot fill $_exitsignal with the correct signal number.\n"));
3753 }
3754
3755 observer_notify_signal_exited (ecs->ws.value.sig);
3756 }
3757
3758 gdb_flush (gdb_stdout);
3759 target_mourn_inferior ();
3760 stop_print_frame = 0;
3761 stop_waiting (ecs);
3762 return;
3763
3764 /* The following are the only cases in which we keep going;
3765 the above cases end in a continue or goto. */
3766 case TARGET_WAITKIND_FORKED:
3767 case TARGET_WAITKIND_VFORKED:
3768 if (debug_infrun)
3769 {
3770 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3771 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3772 else
3773 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3774 }
3775
3776 /* Check whether the inferior is displaced stepping. */
3777 {
3778 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3779 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3780 struct displaced_step_inferior_state *displaced
3781 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3782
3783 /* If checking displaced stepping is supported, and thread
3784 ecs->ptid is displaced stepping. */
3785 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3786 {
3787 struct inferior *parent_inf
3788 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3789 struct regcache *child_regcache;
3790 CORE_ADDR parent_pc;
3791
3792 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3793 indicating that the displaced stepping of syscall instruction
3794 has been done. Perform cleanup for parent process here. Note
3795 that this operation also cleans up the child process for vfork,
3796 because their pages are shared. */
3797 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3798
3799 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3800 {
3801 /* Restore scratch pad for child process. */
3802 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3803 }
3804
3805 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3806 the child's PC is also within the scratchpad. Set the child's PC
3807 to the parent's PC value, which has already been fixed up.
3808 FIXME: we use the parent's aspace here, although we're touching
3809 the child, because the child hasn't been added to the inferior
3810 list yet at this point. */
3811
3812 child_regcache
3813 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3814 gdbarch,
3815 parent_inf->aspace);
3816 /* Read PC value of parent process. */
3817 parent_pc = regcache_read_pc (regcache);
3818
3819 if (debug_displaced)
3820 fprintf_unfiltered (gdb_stdlog,
3821 "displaced: write child pc from %s to %s\n",
3822 paddress (gdbarch,
3823 regcache_read_pc (child_regcache)),
3824 paddress (gdbarch, parent_pc));
3825
3826 regcache_write_pc (child_regcache, parent_pc);
3827 }
3828 }
3829
3830 if (!ptid_equal (ecs->ptid, inferior_ptid))
3831 context_switch (ecs->ptid);
3832
3833 /* Immediately detach breakpoints from the child before there's
3834 any chance of letting the user delete breakpoints from the
3835 breakpoint lists. If we don't do this early, it's easy to
3836 leave left over traps in the child, vis: "break foo; catch
3837 fork; c; <fork>; del; c; <child calls foo>". We only follow
3838 the fork on the last `continue', and by that time the
3839 breakpoint at "foo" is long gone from the breakpoint table.
3840 If we vforked, then we don't need to unpatch here, since both
3841 parent and child are sharing the same memory pages; we'll
3842 need to unpatch at follow/detach time instead to be certain
3843 that new breakpoints added between catchpoint hit time and
3844 vfork follow are detached. */
3845 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3846 {
3847 /* This won't actually modify the breakpoint list, but will
3848 physically remove the breakpoints from the child. */
3849 detach_breakpoints (ecs->ws.value.related_pid);
3850 }
3851
3852 delete_just_stopped_threads_single_step_breakpoints ();
3853
3854 /* In case the event is caught by a catchpoint, remember that
3855 the event is to be followed at the next resume of the thread,
3856 and not immediately. */
3857 ecs->event_thread->pending_follow = ecs->ws;
3858
3859 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3860
3861 ecs->event_thread->control.stop_bpstat
3862 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3863 stop_pc, ecs->ptid, &ecs->ws);
3864
3865 /* If no catchpoint triggered for this, then keep going. Note
3866 that we're interested in knowing the bpstat actually causes a
3867 stop, not just if it may explain the signal. Software
3868 watchpoints, for example, always appear in the bpstat. */
3869 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3870 {
3871 ptid_t parent;
3872 ptid_t child;
3873 int should_resume;
3874 int follow_child
3875 = (follow_fork_mode_string == follow_fork_mode_child);
3876
3877 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3878
3879 should_resume = follow_fork ();
3880
3881 parent = ecs->ptid;
3882 child = ecs->ws.value.related_pid;
3883
3884 /* In non-stop mode, also resume the other branch. */
3885 if (non_stop && !detach_fork)
3886 {
3887 if (follow_child)
3888 switch_to_thread (parent);
3889 else
3890 switch_to_thread (child);
3891
3892 ecs->event_thread = inferior_thread ();
3893 ecs->ptid = inferior_ptid;
3894 keep_going (ecs);
3895 }
3896
3897 if (follow_child)
3898 switch_to_thread (child);
3899 else
3900 switch_to_thread (parent);
3901
3902 ecs->event_thread = inferior_thread ();
3903 ecs->ptid = inferior_ptid;
3904
3905 if (should_resume)
3906 keep_going (ecs);
3907 else
3908 stop_waiting (ecs);
3909 return;
3910 }
3911 process_event_stop_test (ecs);
3912 return;
3913
3914 case TARGET_WAITKIND_VFORK_DONE:
3915 /* Done with the shared memory region. Re-insert breakpoints in
3916 the parent, and keep going. */
3917
3918 if (debug_infrun)
3919 fprintf_unfiltered (gdb_stdlog,
3920 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3921
3922 if (!ptid_equal (ecs->ptid, inferior_ptid))
3923 context_switch (ecs->ptid);
3924
3925 current_inferior ()->waiting_for_vfork_done = 0;
3926 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3927 /* This also takes care of reinserting breakpoints in the
3928 previously locked inferior. */
3929 keep_going (ecs);
3930 return;
3931
3932 case TARGET_WAITKIND_EXECD:
3933 if (debug_infrun)
3934 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3935
3936 if (!ptid_equal (ecs->ptid, inferior_ptid))
3937 context_switch (ecs->ptid);
3938
3939 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3940
3941 /* Do whatever is necessary to the parent branch of the vfork. */
3942 handle_vfork_child_exec_or_exit (1);
3943
3944 /* This causes the eventpoints and symbol table to be reset.
3945 Must do this now, before trying to determine whether to
3946 stop. */
3947 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3948
3949 ecs->event_thread->control.stop_bpstat
3950 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3951 stop_pc, ecs->ptid, &ecs->ws);
3952
3953 /* Note that this may be referenced from inside
3954 bpstat_stop_status above, through inferior_has_execd. */
3955 xfree (ecs->ws.value.execd_pathname);
3956 ecs->ws.value.execd_pathname = NULL;
3957
3958 /* If no catchpoint triggered for this, then keep going. */
3959 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3960 {
3961 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3962 keep_going (ecs);
3963 return;
3964 }
3965 process_event_stop_test (ecs);
3966 return;
3967
3968 /* Be careful not to try to gather much state about a thread
3969 that's in a syscall. It's frequently a losing proposition. */
3970 case TARGET_WAITKIND_SYSCALL_ENTRY:
3971 if (debug_infrun)
3972 fprintf_unfiltered (gdb_stdlog,
3973 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3974 /* Getting the current syscall number. */
3975 if (handle_syscall_event (ecs) == 0)
3976 process_event_stop_test (ecs);
3977 return;
3978
3979 /* Before examining the threads further, step this thread to
3980 get it entirely out of the syscall. (We get notice of the
3981 event when the thread is just on the verge of exiting a
3982 syscall. Stepping one instruction seems to get it back
3983 into user code.) */
3984 case TARGET_WAITKIND_SYSCALL_RETURN:
3985 if (debug_infrun)
3986 fprintf_unfiltered (gdb_stdlog,
3987 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3988 if (handle_syscall_event (ecs) == 0)
3989 process_event_stop_test (ecs);
3990 return;
3991
3992 case TARGET_WAITKIND_STOPPED:
3993 if (debug_infrun)
3994 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3995 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3996 handle_signal_stop (ecs);
3997 return;
3998
3999 case TARGET_WAITKIND_NO_HISTORY:
4000 if (debug_infrun)
4001 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4002 /* Reverse execution: target ran out of history info. */
4003
4004 delete_just_stopped_threads_single_step_breakpoints ();
4005 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4006 observer_notify_no_history ();
4007 stop_waiting (ecs);
4008 return;
4009 }
4010 }
4011
4012 /* Come here when the program has stopped with a signal. */
4013
4014 static void
4015 handle_signal_stop (struct execution_control_state *ecs)
4016 {
4017 struct frame_info *frame;
4018 struct gdbarch *gdbarch;
4019 int stopped_by_watchpoint;
4020 enum stop_kind stop_soon;
4021 int random_signal;
4022
4023 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4024
4025 /* Do we need to clean up the state of a thread that has
4026 completed a displaced single-step? (Doing so usually affects
4027 the PC, so do it here, before we set stop_pc.) */
4028 displaced_step_fixup (ecs->ptid,
4029 ecs->event_thread->suspend.stop_signal);
4030
4031 /* If we either finished a single-step or hit a breakpoint, but
4032 the user wanted this thread to be stopped, pretend we got a
4033 SIG0 (generic unsignaled stop). */
4034 if (ecs->event_thread->stop_requested
4035 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4036 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4037
4038 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4039
4040 if (debug_infrun)
4041 {
4042 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4043 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4044 struct cleanup *old_chain = save_inferior_ptid ();
4045
4046 inferior_ptid = ecs->ptid;
4047
4048 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4049 paddress (gdbarch, stop_pc));
4050 if (target_stopped_by_watchpoint ())
4051 {
4052 CORE_ADDR addr;
4053
4054 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4055
4056 if (target_stopped_data_address (&current_target, &addr))
4057 fprintf_unfiltered (gdb_stdlog,
4058 "infrun: stopped data address = %s\n",
4059 paddress (gdbarch, addr));
4060 else
4061 fprintf_unfiltered (gdb_stdlog,
4062 "infrun: (no data address available)\n");
4063 }
4064
4065 do_cleanups (old_chain);
4066 }
4067
4068 /* This is originated from start_remote(), start_inferior() and
4069 shared libraries hook functions. */
4070 stop_soon = get_inferior_stop_soon (ecs->ptid);
4071 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4072 {
4073 if (!ptid_equal (ecs->ptid, inferior_ptid))
4074 context_switch (ecs->ptid);
4075 if (debug_infrun)
4076 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4077 stop_print_frame = 1;
4078 stop_waiting (ecs);
4079 return;
4080 }
4081
4082 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4083 && stop_after_trap)
4084 {
4085 if (!ptid_equal (ecs->ptid, inferior_ptid))
4086 context_switch (ecs->ptid);
4087 if (debug_infrun)
4088 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4089 stop_print_frame = 0;
4090 stop_waiting (ecs);
4091 return;
4092 }
4093
4094 /* This originates from attach_command(). We need to overwrite
4095 the stop_signal here, because some kernels don't ignore a
4096 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4097 See more comments in inferior.h. On the other hand, if we
4098 get a non-SIGSTOP, report it to the user - assume the backend
4099 will handle the SIGSTOP if it should show up later.
4100
4101 Also consider that the attach is complete when we see a
4102 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4103 target extended-remote report it instead of a SIGSTOP
4104 (e.g. gdbserver). We already rely on SIGTRAP being our
4105 signal, so this is no exception.
4106
4107 Also consider that the attach is complete when we see a
4108 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4109 the target to stop all threads of the inferior, in case the
4110 low level attach operation doesn't stop them implicitly. If
4111 they weren't stopped implicitly, then the stub will report a
4112 GDB_SIGNAL_0, meaning: stopped for no particular reason
4113 other than GDB's request. */
4114 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4115 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4116 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4117 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4118 {
4119 stop_print_frame = 1;
4120 stop_waiting (ecs);
4121 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4122 return;
4123 }
4124
4125 /* See if something interesting happened to the non-current thread. If
4126 so, then switch to that thread. */
4127 if (!ptid_equal (ecs->ptid, inferior_ptid))
4128 {
4129 if (debug_infrun)
4130 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4131
4132 context_switch (ecs->ptid);
4133
4134 if (deprecated_context_hook)
4135 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4136 }
4137
4138 /* At this point, get hold of the now-current thread's frame. */
4139 frame = get_current_frame ();
4140 gdbarch = get_frame_arch (frame);
4141
4142 /* Pull the single step breakpoints out of the target. */
4143 if (gdbarch_software_single_step_p (gdbarch))
4144 {
4145 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4146 {
4147 struct regcache *regcache;
4148 struct address_space *aspace;
4149 CORE_ADDR pc;
4150
4151 regcache = get_thread_regcache (ecs->ptid);
4152 aspace = get_regcache_aspace (regcache);
4153 pc = regcache_read_pc (regcache);
4154
4155 /* However, before doing so, if this single-step breakpoint was
4156 actually for another thread, set this thread up for moving
4157 past it. */
4158 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4159 aspace, pc))
4160 {
4161 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4162 {
4163 if (debug_infrun)
4164 {
4165 fprintf_unfiltered (gdb_stdlog,
4166 "infrun: [%s] hit another thread's "
4167 "single-step breakpoint\n",
4168 target_pid_to_str (ecs->ptid));
4169 }
4170 ecs->hit_singlestep_breakpoint = 1;
4171 }
4172 }
4173 else
4174 {
4175 if (debug_infrun)
4176 {
4177 fprintf_unfiltered (gdb_stdlog,
4178 "infrun: [%s] hit its "
4179 "single-step breakpoint\n",
4180 target_pid_to_str (ecs->ptid));
4181 }
4182 }
4183 }
4184
4185 delete_just_stopped_threads_single_step_breakpoints ();
4186 }
4187
4188 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4189 && ecs->event_thread->control.trap_expected
4190 && ecs->event_thread->stepping_over_watchpoint)
4191 stopped_by_watchpoint = 0;
4192 else
4193 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4194
4195 /* If necessary, step over this watchpoint. We'll be back to display
4196 it in a moment. */
4197 if (stopped_by_watchpoint
4198 && (target_have_steppable_watchpoint
4199 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4200 {
4201 /* At this point, we are stopped at an instruction which has
4202 attempted to write to a piece of memory under control of
4203 a watchpoint. The instruction hasn't actually executed
4204 yet. If we were to evaluate the watchpoint expression
4205 now, we would get the old value, and therefore no change
4206 would seem to have occurred.
4207
4208 In order to make watchpoints work `right', we really need
4209 to complete the memory write, and then evaluate the
4210 watchpoint expression. We do this by single-stepping the
4211 target.
4212
4213 It may not be necessary to disable the watchpoint to step over
4214 it. For example, the PA can (with some kernel cooperation)
4215 single step over a watchpoint without disabling the watchpoint.
4216
4217 It is far more common to need to disable a watchpoint to step
4218 the inferior over it. If we have non-steppable watchpoints,
4219 we must disable the current watchpoint; it's simplest to
4220 disable all watchpoints.
4221
4222 Any breakpoint at PC must also be stepped over -- if there's
4223 one, it will have already triggered before the watchpoint
4224 triggered, and we either already reported it to the user, or
4225 it didn't cause a stop and we called keep_going. In either
4226 case, if there was a breakpoint at PC, we must be trying to
4227 step past it. */
4228 ecs->event_thread->stepping_over_watchpoint = 1;
4229 keep_going (ecs);
4230 return;
4231 }
4232
4233 ecs->event_thread->stepping_over_breakpoint = 0;
4234 ecs->event_thread->stepping_over_watchpoint = 0;
4235 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4236 ecs->event_thread->control.stop_step = 0;
4237 stop_print_frame = 1;
4238 stopped_by_random_signal = 0;
4239
4240 /* Hide inlined functions starting here, unless we just performed stepi or
4241 nexti. After stepi and nexti, always show the innermost frame (not any
4242 inline function call sites). */
4243 if (ecs->event_thread->control.step_range_end != 1)
4244 {
4245 struct address_space *aspace =
4246 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4247
4248 /* skip_inline_frames is expensive, so we avoid it if we can
4249 determine that the address is one where functions cannot have
4250 been inlined. This improves performance with inferiors that
4251 load a lot of shared libraries, because the solib event
4252 breakpoint is defined as the address of a function (i.e. not
4253 inline). Note that we have to check the previous PC as well
4254 as the current one to catch cases when we have just
4255 single-stepped off a breakpoint prior to reinstating it.
4256 Note that we're assuming that the code we single-step to is
4257 not inline, but that's not definitive: there's nothing
4258 preventing the event breakpoint function from containing
4259 inlined code, and the single-step ending up there. If the
4260 user had set a breakpoint on that inlined code, the missing
4261 skip_inline_frames call would break things. Fortunately
4262 that's an extremely unlikely scenario. */
4263 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4264 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4265 && ecs->event_thread->control.trap_expected
4266 && pc_at_non_inline_function (aspace,
4267 ecs->event_thread->prev_pc,
4268 &ecs->ws)))
4269 {
4270 skip_inline_frames (ecs->ptid);
4271
4272 /* Re-fetch current thread's frame in case that invalidated
4273 the frame cache. */
4274 frame = get_current_frame ();
4275 gdbarch = get_frame_arch (frame);
4276 }
4277 }
4278
4279 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4280 && ecs->event_thread->control.trap_expected
4281 && gdbarch_single_step_through_delay_p (gdbarch)
4282 && currently_stepping (ecs->event_thread))
4283 {
4284 /* We're trying to step off a breakpoint. Turns out that we're
4285 also on an instruction that needs to be stepped multiple
4286 times before it's been fully executing. E.g., architectures
4287 with a delay slot. It needs to be stepped twice, once for
4288 the instruction and once for the delay slot. */
4289 int step_through_delay
4290 = gdbarch_single_step_through_delay (gdbarch, frame);
4291
4292 if (debug_infrun && step_through_delay)
4293 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4294 if (ecs->event_thread->control.step_range_end == 0
4295 && step_through_delay)
4296 {
4297 /* The user issued a continue when stopped at a breakpoint.
4298 Set up for another trap and get out of here. */
4299 ecs->event_thread->stepping_over_breakpoint = 1;
4300 keep_going (ecs);
4301 return;
4302 }
4303 else if (step_through_delay)
4304 {
4305 /* The user issued a step when stopped at a breakpoint.
4306 Maybe we should stop, maybe we should not - the delay
4307 slot *might* correspond to a line of source. In any
4308 case, don't decide that here, just set
4309 ecs->stepping_over_breakpoint, making sure we
4310 single-step again before breakpoints are re-inserted. */
4311 ecs->event_thread->stepping_over_breakpoint = 1;
4312 }
4313 }
4314
4315 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4316 handles this event. */
4317 ecs->event_thread->control.stop_bpstat
4318 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4319 stop_pc, ecs->ptid, &ecs->ws);
4320
4321 /* Following in case break condition called a
4322 function. */
4323 stop_print_frame = 1;
4324
4325 /* This is where we handle "moribund" watchpoints. Unlike
4326 software breakpoints traps, hardware watchpoint traps are
4327 always distinguishable from random traps. If no high-level
4328 watchpoint is associated with the reported stop data address
4329 anymore, then the bpstat does not explain the signal ---
4330 simply make sure to ignore it if `stopped_by_watchpoint' is
4331 set. */
4332
4333 if (debug_infrun
4334 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4335 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4336 GDB_SIGNAL_TRAP)
4337 && stopped_by_watchpoint)
4338 fprintf_unfiltered (gdb_stdlog,
4339 "infrun: no user watchpoint explains "
4340 "watchpoint SIGTRAP, ignoring\n");
4341
4342 /* NOTE: cagney/2003-03-29: These checks for a random signal
4343 at one stage in the past included checks for an inferior
4344 function call's call dummy's return breakpoint. The original
4345 comment, that went with the test, read:
4346
4347 ``End of a stack dummy. Some systems (e.g. Sony news) give
4348 another signal besides SIGTRAP, so check here as well as
4349 above.''
4350
4351 If someone ever tries to get call dummys on a
4352 non-executable stack to work (where the target would stop
4353 with something like a SIGSEGV), then those tests might need
4354 to be re-instated. Given, however, that the tests were only
4355 enabled when momentary breakpoints were not being used, I
4356 suspect that it won't be the case.
4357
4358 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4359 be necessary for call dummies on a non-executable stack on
4360 SPARC. */
4361
4362 /* See if the breakpoints module can explain the signal. */
4363 random_signal
4364 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4365 ecs->event_thread->suspend.stop_signal);
4366
4367 /* If not, perhaps stepping/nexting can. */
4368 if (random_signal)
4369 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4370 && currently_stepping (ecs->event_thread));
4371
4372 /* Perhaps the thread hit a single-step breakpoint of _another_
4373 thread. Single-step breakpoints are transparent to the
4374 breakpoints module. */
4375 if (random_signal)
4376 random_signal = !ecs->hit_singlestep_breakpoint;
4377
4378 /* No? Perhaps we got a moribund watchpoint. */
4379 if (random_signal)
4380 random_signal = !stopped_by_watchpoint;
4381
4382 /* For the program's own signals, act according to
4383 the signal handling tables. */
4384
4385 if (random_signal)
4386 {
4387 /* Signal not for debugging purposes. */
4388 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4389 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4390
4391 if (debug_infrun)
4392 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4393 gdb_signal_to_symbol_string (stop_signal));
4394
4395 stopped_by_random_signal = 1;
4396
4397 /* Always stop on signals if we're either just gaining control
4398 of the program, or the user explicitly requested this thread
4399 to remain stopped. */
4400 if (stop_soon != NO_STOP_QUIETLY
4401 || ecs->event_thread->stop_requested
4402 || (!inf->detaching
4403 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4404 {
4405 stop_waiting (ecs);
4406 return;
4407 }
4408
4409 /* Notify observers the signal has "handle print" set. Note we
4410 returned early above if stopping; normal_stop handles the
4411 printing in that case. */
4412 if (signal_print[ecs->event_thread->suspend.stop_signal])
4413 {
4414 /* The signal table tells us to print about this signal. */
4415 target_terminal_ours_for_output ();
4416 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4417 target_terminal_inferior ();
4418 }
4419
4420 /* Clear the signal if it should not be passed. */
4421 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4422 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4423
4424 if (ecs->event_thread->prev_pc == stop_pc
4425 && ecs->event_thread->control.trap_expected
4426 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4427 {
4428 /* We were just starting a new sequence, attempting to
4429 single-step off of a breakpoint and expecting a SIGTRAP.
4430 Instead this signal arrives. This signal will take us out
4431 of the stepping range so GDB needs to remember to, when
4432 the signal handler returns, resume stepping off that
4433 breakpoint. */
4434 /* To simplify things, "continue" is forced to use the same
4435 code paths as single-step - set a breakpoint at the
4436 signal return address and then, once hit, step off that
4437 breakpoint. */
4438 if (debug_infrun)
4439 fprintf_unfiltered (gdb_stdlog,
4440 "infrun: signal arrived while stepping over "
4441 "breakpoint\n");
4442
4443 insert_hp_step_resume_breakpoint_at_frame (frame);
4444 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4445 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4446 ecs->event_thread->control.trap_expected = 0;
4447
4448 /* If we were nexting/stepping some other thread, switch to
4449 it, so that we don't continue it, losing control. */
4450 if (!switch_back_to_stepped_thread (ecs))
4451 keep_going (ecs);
4452 return;
4453 }
4454
4455 if (ecs->event_thread->control.step_range_end != 0
4456 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4457 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4458 && frame_id_eq (get_stack_frame_id (frame),
4459 ecs->event_thread->control.step_stack_frame_id)
4460 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4461 {
4462 /* The inferior is about to take a signal that will take it
4463 out of the single step range. Set a breakpoint at the
4464 current PC (which is presumably where the signal handler
4465 will eventually return) and then allow the inferior to
4466 run free.
4467
4468 Note that this is only needed for a signal delivered
4469 while in the single-step range. Nested signals aren't a
4470 problem as they eventually all return. */
4471 if (debug_infrun)
4472 fprintf_unfiltered (gdb_stdlog,
4473 "infrun: signal may take us out of "
4474 "single-step range\n");
4475
4476 insert_hp_step_resume_breakpoint_at_frame (frame);
4477 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4478 ecs->event_thread->control.trap_expected = 0;
4479 keep_going (ecs);
4480 return;
4481 }
4482
4483 /* Note: step_resume_breakpoint may be non-NULL. This occures
4484 when either there's a nested signal, or when there's a
4485 pending signal enabled just as the signal handler returns
4486 (leaving the inferior at the step-resume-breakpoint without
4487 actually executing it). Either way continue until the
4488 breakpoint is really hit. */
4489
4490 if (!switch_back_to_stepped_thread (ecs))
4491 {
4492 if (debug_infrun)
4493 fprintf_unfiltered (gdb_stdlog,
4494 "infrun: random signal, keep going\n");
4495
4496 keep_going (ecs);
4497 }
4498 return;
4499 }
4500
4501 process_event_stop_test (ecs);
4502 }
4503
4504 /* Come here when we've got some debug event / signal we can explain
4505 (IOW, not a random signal), and test whether it should cause a
4506 stop, or whether we should resume the inferior (transparently).
4507 E.g., could be a breakpoint whose condition evaluates false; we
4508 could be still stepping within the line; etc. */
4509
4510 static void
4511 process_event_stop_test (struct execution_control_state *ecs)
4512 {
4513 struct symtab_and_line stop_pc_sal;
4514 struct frame_info *frame;
4515 struct gdbarch *gdbarch;
4516 CORE_ADDR jmp_buf_pc;
4517 struct bpstat_what what;
4518
4519 /* Handle cases caused by hitting a breakpoint. */
4520
4521 frame = get_current_frame ();
4522 gdbarch = get_frame_arch (frame);
4523
4524 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4525
4526 if (what.call_dummy)
4527 {
4528 stop_stack_dummy = what.call_dummy;
4529 }
4530
4531 /* If we hit an internal event that triggers symbol changes, the
4532 current frame will be invalidated within bpstat_what (e.g., if we
4533 hit an internal solib event). Re-fetch it. */
4534 frame = get_current_frame ();
4535 gdbarch = get_frame_arch (frame);
4536
4537 switch (what.main_action)
4538 {
4539 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4540 /* If we hit the breakpoint at longjmp while stepping, we
4541 install a momentary breakpoint at the target of the
4542 jmp_buf. */
4543
4544 if (debug_infrun)
4545 fprintf_unfiltered (gdb_stdlog,
4546 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4547
4548 ecs->event_thread->stepping_over_breakpoint = 1;
4549
4550 if (what.is_longjmp)
4551 {
4552 struct value *arg_value;
4553
4554 /* If we set the longjmp breakpoint via a SystemTap probe,
4555 then use it to extract the arguments. The destination PC
4556 is the third argument to the probe. */
4557 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4558 if (arg_value)
4559 {
4560 jmp_buf_pc = value_as_address (arg_value);
4561 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4562 }
4563 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4564 || !gdbarch_get_longjmp_target (gdbarch,
4565 frame, &jmp_buf_pc))
4566 {
4567 if (debug_infrun)
4568 fprintf_unfiltered (gdb_stdlog,
4569 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4570 "(!gdbarch_get_longjmp_target)\n");
4571 keep_going (ecs);
4572 return;
4573 }
4574
4575 /* Insert a breakpoint at resume address. */
4576 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4577 }
4578 else
4579 check_exception_resume (ecs, frame);
4580 keep_going (ecs);
4581 return;
4582
4583 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4584 {
4585 struct frame_info *init_frame;
4586
4587 /* There are several cases to consider.
4588
4589 1. The initiating frame no longer exists. In this case we
4590 must stop, because the exception or longjmp has gone too
4591 far.
4592
4593 2. The initiating frame exists, and is the same as the
4594 current frame. We stop, because the exception or longjmp
4595 has been caught.
4596
4597 3. The initiating frame exists and is different from the
4598 current frame. This means the exception or longjmp has
4599 been caught beneath the initiating frame, so keep going.
4600
4601 4. longjmp breakpoint has been placed just to protect
4602 against stale dummy frames and user is not interested in
4603 stopping around longjmps. */
4604
4605 if (debug_infrun)
4606 fprintf_unfiltered (gdb_stdlog,
4607 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4608
4609 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4610 != NULL);
4611 delete_exception_resume_breakpoint (ecs->event_thread);
4612
4613 if (what.is_longjmp)
4614 {
4615 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4616
4617 if (!frame_id_p (ecs->event_thread->initiating_frame))
4618 {
4619 /* Case 4. */
4620 keep_going (ecs);
4621 return;
4622 }
4623 }
4624
4625 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4626
4627 if (init_frame)
4628 {
4629 struct frame_id current_id
4630 = get_frame_id (get_current_frame ());
4631 if (frame_id_eq (current_id,
4632 ecs->event_thread->initiating_frame))
4633 {
4634 /* Case 2. Fall through. */
4635 }
4636 else
4637 {
4638 /* Case 3. */
4639 keep_going (ecs);
4640 return;
4641 }
4642 }
4643
4644 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4645 exists. */
4646 delete_step_resume_breakpoint (ecs->event_thread);
4647
4648 end_stepping_range (ecs);
4649 }
4650 return;
4651
4652 case BPSTAT_WHAT_SINGLE:
4653 if (debug_infrun)
4654 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4655 ecs->event_thread->stepping_over_breakpoint = 1;
4656 /* Still need to check other stuff, at least the case where we
4657 are stepping and step out of the right range. */
4658 break;
4659
4660 case BPSTAT_WHAT_STEP_RESUME:
4661 if (debug_infrun)
4662 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4663
4664 delete_step_resume_breakpoint (ecs->event_thread);
4665 if (ecs->event_thread->control.proceed_to_finish
4666 && execution_direction == EXEC_REVERSE)
4667 {
4668 struct thread_info *tp = ecs->event_thread;
4669
4670 /* We are finishing a function in reverse, and just hit the
4671 step-resume breakpoint at the start address of the
4672 function, and we're almost there -- just need to back up
4673 by one more single-step, which should take us back to the
4674 function call. */
4675 tp->control.step_range_start = tp->control.step_range_end = 1;
4676 keep_going (ecs);
4677 return;
4678 }
4679 fill_in_stop_func (gdbarch, ecs);
4680 if (stop_pc == ecs->stop_func_start
4681 && execution_direction == EXEC_REVERSE)
4682 {
4683 /* We are stepping over a function call in reverse, and just
4684 hit the step-resume breakpoint at the start address of
4685 the function. Go back to single-stepping, which should
4686 take us back to the function call. */
4687 ecs->event_thread->stepping_over_breakpoint = 1;
4688 keep_going (ecs);
4689 return;
4690 }
4691 break;
4692
4693 case BPSTAT_WHAT_STOP_NOISY:
4694 if (debug_infrun)
4695 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4696 stop_print_frame = 1;
4697
4698 /* Assume the thread stopped for a breapoint. We'll still check
4699 whether a/the breakpoint is there when the thread is next
4700 resumed. */
4701 ecs->event_thread->stepping_over_breakpoint = 1;
4702
4703 stop_waiting (ecs);
4704 return;
4705
4706 case BPSTAT_WHAT_STOP_SILENT:
4707 if (debug_infrun)
4708 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4709 stop_print_frame = 0;
4710
4711 /* Assume the thread stopped for a breapoint. We'll still check
4712 whether a/the breakpoint is there when the thread is next
4713 resumed. */
4714 ecs->event_thread->stepping_over_breakpoint = 1;
4715 stop_waiting (ecs);
4716 return;
4717
4718 case BPSTAT_WHAT_HP_STEP_RESUME:
4719 if (debug_infrun)
4720 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4721
4722 delete_step_resume_breakpoint (ecs->event_thread);
4723 if (ecs->event_thread->step_after_step_resume_breakpoint)
4724 {
4725 /* Back when the step-resume breakpoint was inserted, we
4726 were trying to single-step off a breakpoint. Go back to
4727 doing that. */
4728 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4729 ecs->event_thread->stepping_over_breakpoint = 1;
4730 keep_going (ecs);
4731 return;
4732 }
4733 break;
4734
4735 case BPSTAT_WHAT_KEEP_CHECKING:
4736 break;
4737 }
4738
4739 /* We come here if we hit a breakpoint but should not stop for it.
4740 Possibly we also were stepping and should stop for that. So fall
4741 through and test for stepping. But, if not stepping, do not
4742 stop. */
4743
4744 /* In all-stop mode, if we're currently stepping but have stopped in
4745 some other thread, we need to switch back to the stepped thread. */
4746 if (switch_back_to_stepped_thread (ecs))
4747 return;
4748
4749 if (ecs->event_thread->control.step_resume_breakpoint)
4750 {
4751 if (debug_infrun)
4752 fprintf_unfiltered (gdb_stdlog,
4753 "infrun: step-resume breakpoint is inserted\n");
4754
4755 /* Having a step-resume breakpoint overrides anything
4756 else having to do with stepping commands until
4757 that breakpoint is reached. */
4758 keep_going (ecs);
4759 return;
4760 }
4761
4762 if (ecs->event_thread->control.step_range_end == 0)
4763 {
4764 if (debug_infrun)
4765 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4766 /* Likewise if we aren't even stepping. */
4767 keep_going (ecs);
4768 return;
4769 }
4770
4771 /* Re-fetch current thread's frame in case the code above caused
4772 the frame cache to be re-initialized, making our FRAME variable
4773 a dangling pointer. */
4774 frame = get_current_frame ();
4775 gdbarch = get_frame_arch (frame);
4776 fill_in_stop_func (gdbarch, ecs);
4777
4778 /* If stepping through a line, keep going if still within it.
4779
4780 Note that step_range_end is the address of the first instruction
4781 beyond the step range, and NOT the address of the last instruction
4782 within it!
4783
4784 Note also that during reverse execution, we may be stepping
4785 through a function epilogue and therefore must detect when
4786 the current-frame changes in the middle of a line. */
4787
4788 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4789 && (execution_direction != EXEC_REVERSE
4790 || frame_id_eq (get_frame_id (frame),
4791 ecs->event_thread->control.step_frame_id)))
4792 {
4793 if (debug_infrun)
4794 fprintf_unfiltered
4795 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4796 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4797 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4798
4799 /* Tentatively re-enable range stepping; `resume' disables it if
4800 necessary (e.g., if we're stepping over a breakpoint or we
4801 have software watchpoints). */
4802 ecs->event_thread->control.may_range_step = 1;
4803
4804 /* When stepping backward, stop at beginning of line range
4805 (unless it's the function entry point, in which case
4806 keep going back to the call point). */
4807 if (stop_pc == ecs->event_thread->control.step_range_start
4808 && stop_pc != ecs->stop_func_start
4809 && execution_direction == EXEC_REVERSE)
4810 end_stepping_range (ecs);
4811 else
4812 keep_going (ecs);
4813
4814 return;
4815 }
4816
4817 /* We stepped out of the stepping range. */
4818
4819 /* If we are stepping at the source level and entered the runtime
4820 loader dynamic symbol resolution code...
4821
4822 EXEC_FORWARD: we keep on single stepping until we exit the run
4823 time loader code and reach the callee's address.
4824
4825 EXEC_REVERSE: we've already executed the callee (backward), and
4826 the runtime loader code is handled just like any other
4827 undebuggable function call. Now we need only keep stepping
4828 backward through the trampoline code, and that's handled further
4829 down, so there is nothing for us to do here. */
4830
4831 if (execution_direction != EXEC_REVERSE
4832 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4833 && in_solib_dynsym_resolve_code (stop_pc))
4834 {
4835 CORE_ADDR pc_after_resolver =
4836 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4837
4838 if (debug_infrun)
4839 fprintf_unfiltered (gdb_stdlog,
4840 "infrun: stepped into dynsym resolve code\n");
4841
4842 if (pc_after_resolver)
4843 {
4844 /* Set up a step-resume breakpoint at the address
4845 indicated by SKIP_SOLIB_RESOLVER. */
4846 struct symtab_and_line sr_sal;
4847
4848 init_sal (&sr_sal);
4849 sr_sal.pc = pc_after_resolver;
4850 sr_sal.pspace = get_frame_program_space (frame);
4851
4852 insert_step_resume_breakpoint_at_sal (gdbarch,
4853 sr_sal, null_frame_id);
4854 }
4855
4856 keep_going (ecs);
4857 return;
4858 }
4859
4860 if (ecs->event_thread->control.step_range_end != 1
4861 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4862 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4863 && get_frame_type (frame) == SIGTRAMP_FRAME)
4864 {
4865 if (debug_infrun)
4866 fprintf_unfiltered (gdb_stdlog,
4867 "infrun: stepped into signal trampoline\n");
4868 /* The inferior, while doing a "step" or "next", has ended up in
4869 a signal trampoline (either by a signal being delivered or by
4870 the signal handler returning). Just single-step until the
4871 inferior leaves the trampoline (either by calling the handler
4872 or returning). */
4873 keep_going (ecs);
4874 return;
4875 }
4876
4877 /* If we're in the return path from a shared library trampoline,
4878 we want to proceed through the trampoline when stepping. */
4879 /* macro/2012-04-25: This needs to come before the subroutine
4880 call check below as on some targets return trampolines look
4881 like subroutine calls (MIPS16 return thunks). */
4882 if (gdbarch_in_solib_return_trampoline (gdbarch,
4883 stop_pc, ecs->stop_func_name)
4884 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4885 {
4886 /* Determine where this trampoline returns. */
4887 CORE_ADDR real_stop_pc;
4888
4889 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4890
4891 if (debug_infrun)
4892 fprintf_unfiltered (gdb_stdlog,
4893 "infrun: stepped into solib return tramp\n");
4894
4895 /* Only proceed through if we know where it's going. */
4896 if (real_stop_pc)
4897 {
4898 /* And put the step-breakpoint there and go until there. */
4899 struct symtab_and_line sr_sal;
4900
4901 init_sal (&sr_sal); /* initialize to zeroes */
4902 sr_sal.pc = real_stop_pc;
4903 sr_sal.section = find_pc_overlay (sr_sal.pc);
4904 sr_sal.pspace = get_frame_program_space (frame);
4905
4906 /* Do not specify what the fp should be when we stop since
4907 on some machines the prologue is where the new fp value
4908 is established. */
4909 insert_step_resume_breakpoint_at_sal (gdbarch,
4910 sr_sal, null_frame_id);
4911
4912 /* Restart without fiddling with the step ranges or
4913 other state. */
4914 keep_going (ecs);
4915 return;
4916 }
4917 }
4918
4919 /* Check for subroutine calls. The check for the current frame
4920 equalling the step ID is not necessary - the check of the
4921 previous frame's ID is sufficient - but it is a common case and
4922 cheaper than checking the previous frame's ID.
4923
4924 NOTE: frame_id_eq will never report two invalid frame IDs as
4925 being equal, so to get into this block, both the current and
4926 previous frame must have valid frame IDs. */
4927 /* The outer_frame_id check is a heuristic to detect stepping
4928 through startup code. If we step over an instruction which
4929 sets the stack pointer from an invalid value to a valid value,
4930 we may detect that as a subroutine call from the mythical
4931 "outermost" function. This could be fixed by marking
4932 outermost frames as !stack_p,code_p,special_p. Then the
4933 initial outermost frame, before sp was valid, would
4934 have code_addr == &_start. See the comment in frame_id_eq
4935 for more. */
4936 if (!frame_id_eq (get_stack_frame_id (frame),
4937 ecs->event_thread->control.step_stack_frame_id)
4938 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4939 ecs->event_thread->control.step_stack_frame_id)
4940 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4941 outer_frame_id)
4942 || step_start_function != find_pc_function (stop_pc))))
4943 {
4944 CORE_ADDR real_stop_pc;
4945
4946 if (debug_infrun)
4947 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4948
4949 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4950 || ((ecs->event_thread->control.step_range_end == 1)
4951 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4952 ecs->stop_func_start)))
4953 {
4954 /* I presume that step_over_calls is only 0 when we're
4955 supposed to be stepping at the assembly language level
4956 ("stepi"). Just stop. */
4957 /* Also, maybe we just did a "nexti" inside a prolog, so we
4958 thought it was a subroutine call but it was not. Stop as
4959 well. FENN */
4960 /* And this works the same backward as frontward. MVS */
4961 end_stepping_range (ecs);
4962 return;
4963 }
4964
4965 /* Reverse stepping through solib trampolines. */
4966
4967 if (execution_direction == EXEC_REVERSE
4968 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4969 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4970 || (ecs->stop_func_start == 0
4971 && in_solib_dynsym_resolve_code (stop_pc))))
4972 {
4973 /* Any solib trampoline code can be handled in reverse
4974 by simply continuing to single-step. We have already
4975 executed the solib function (backwards), and a few
4976 steps will take us back through the trampoline to the
4977 caller. */
4978 keep_going (ecs);
4979 return;
4980 }
4981
4982 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4983 {
4984 /* We're doing a "next".
4985
4986 Normal (forward) execution: set a breakpoint at the
4987 callee's return address (the address at which the caller
4988 will resume).
4989
4990 Reverse (backward) execution. set the step-resume
4991 breakpoint at the start of the function that we just
4992 stepped into (backwards), and continue to there. When we
4993 get there, we'll need to single-step back to the caller. */
4994
4995 if (execution_direction == EXEC_REVERSE)
4996 {
4997 /* If we're already at the start of the function, we've either
4998 just stepped backward into a single instruction function,
4999 or stepped back out of a signal handler to the first instruction
5000 of the function. Just keep going, which will single-step back
5001 to the caller. */
5002 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5003 {
5004 struct symtab_and_line sr_sal;
5005
5006 /* Normal function call return (static or dynamic). */
5007 init_sal (&sr_sal);
5008 sr_sal.pc = ecs->stop_func_start;
5009 sr_sal.pspace = get_frame_program_space (frame);
5010 insert_step_resume_breakpoint_at_sal (gdbarch,
5011 sr_sal, null_frame_id);
5012 }
5013 }
5014 else
5015 insert_step_resume_breakpoint_at_caller (frame);
5016
5017 keep_going (ecs);
5018 return;
5019 }
5020
5021 /* If we are in a function call trampoline (a stub between the
5022 calling routine and the real function), locate the real
5023 function. That's what tells us (a) whether we want to step
5024 into it at all, and (b) what prologue we want to run to the
5025 end of, if we do step into it. */
5026 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5027 if (real_stop_pc == 0)
5028 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5029 if (real_stop_pc != 0)
5030 ecs->stop_func_start = real_stop_pc;
5031
5032 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5033 {
5034 struct symtab_and_line sr_sal;
5035
5036 init_sal (&sr_sal);
5037 sr_sal.pc = ecs->stop_func_start;
5038 sr_sal.pspace = get_frame_program_space (frame);
5039
5040 insert_step_resume_breakpoint_at_sal (gdbarch,
5041 sr_sal, null_frame_id);
5042 keep_going (ecs);
5043 return;
5044 }
5045
5046 /* If we have line number information for the function we are
5047 thinking of stepping into and the function isn't on the skip
5048 list, step into it.
5049
5050 If there are several symtabs at that PC (e.g. with include
5051 files), just want to know whether *any* of them have line
5052 numbers. find_pc_line handles this. */
5053 {
5054 struct symtab_and_line tmp_sal;
5055
5056 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5057 if (tmp_sal.line != 0
5058 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5059 &tmp_sal))
5060 {
5061 if (execution_direction == EXEC_REVERSE)
5062 handle_step_into_function_backward (gdbarch, ecs);
5063 else
5064 handle_step_into_function (gdbarch, ecs);
5065 return;
5066 }
5067 }
5068
5069 /* If we have no line number and the step-stop-if-no-debug is
5070 set, we stop the step so that the user has a chance to switch
5071 in assembly mode. */
5072 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5073 && step_stop_if_no_debug)
5074 {
5075 end_stepping_range (ecs);
5076 return;
5077 }
5078
5079 if (execution_direction == EXEC_REVERSE)
5080 {
5081 /* If we're already at the start of the function, we've either just
5082 stepped backward into a single instruction function without line
5083 number info, or stepped back out of a signal handler to the first
5084 instruction of the function without line number info. Just keep
5085 going, which will single-step back to the caller. */
5086 if (ecs->stop_func_start != stop_pc)
5087 {
5088 /* Set a breakpoint at callee's start address.
5089 From there we can step once and be back in the caller. */
5090 struct symtab_and_line sr_sal;
5091
5092 init_sal (&sr_sal);
5093 sr_sal.pc = ecs->stop_func_start;
5094 sr_sal.pspace = get_frame_program_space (frame);
5095 insert_step_resume_breakpoint_at_sal (gdbarch,
5096 sr_sal, null_frame_id);
5097 }
5098 }
5099 else
5100 /* Set a breakpoint at callee's return address (the address
5101 at which the caller will resume). */
5102 insert_step_resume_breakpoint_at_caller (frame);
5103
5104 keep_going (ecs);
5105 return;
5106 }
5107
5108 /* Reverse stepping through solib trampolines. */
5109
5110 if (execution_direction == EXEC_REVERSE
5111 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5112 {
5113 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5114 || (ecs->stop_func_start == 0
5115 && in_solib_dynsym_resolve_code (stop_pc)))
5116 {
5117 /* Any solib trampoline code can be handled in reverse
5118 by simply continuing to single-step. We have already
5119 executed the solib function (backwards), and a few
5120 steps will take us back through the trampoline to the
5121 caller. */
5122 keep_going (ecs);
5123 return;
5124 }
5125 else if (in_solib_dynsym_resolve_code (stop_pc))
5126 {
5127 /* Stepped backward into the solib dynsym resolver.
5128 Set a breakpoint at its start and continue, then
5129 one more step will take us out. */
5130 struct symtab_and_line sr_sal;
5131
5132 init_sal (&sr_sal);
5133 sr_sal.pc = ecs->stop_func_start;
5134 sr_sal.pspace = get_frame_program_space (frame);
5135 insert_step_resume_breakpoint_at_sal (gdbarch,
5136 sr_sal, null_frame_id);
5137 keep_going (ecs);
5138 return;
5139 }
5140 }
5141
5142 stop_pc_sal = find_pc_line (stop_pc, 0);
5143
5144 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5145 the trampoline processing logic, however, there are some trampolines
5146 that have no names, so we should do trampoline handling first. */
5147 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5148 && ecs->stop_func_name == NULL
5149 && stop_pc_sal.line == 0)
5150 {
5151 if (debug_infrun)
5152 fprintf_unfiltered (gdb_stdlog,
5153 "infrun: stepped into undebuggable function\n");
5154
5155 /* The inferior just stepped into, or returned to, an
5156 undebuggable function (where there is no debugging information
5157 and no line number corresponding to the address where the
5158 inferior stopped). Since we want to skip this kind of code,
5159 we keep going until the inferior returns from this
5160 function - unless the user has asked us not to (via
5161 set step-mode) or we no longer know how to get back
5162 to the call site. */
5163 if (step_stop_if_no_debug
5164 || !frame_id_p (frame_unwind_caller_id (frame)))
5165 {
5166 /* If we have no line number and the step-stop-if-no-debug
5167 is set, we stop the step so that the user has a chance to
5168 switch in assembly mode. */
5169 end_stepping_range (ecs);
5170 return;
5171 }
5172 else
5173 {
5174 /* Set a breakpoint at callee's return address (the address
5175 at which the caller will resume). */
5176 insert_step_resume_breakpoint_at_caller (frame);
5177 keep_going (ecs);
5178 return;
5179 }
5180 }
5181
5182 if (ecs->event_thread->control.step_range_end == 1)
5183 {
5184 /* It is stepi or nexti. We always want to stop stepping after
5185 one instruction. */
5186 if (debug_infrun)
5187 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5188 end_stepping_range (ecs);
5189 return;
5190 }
5191
5192 if (stop_pc_sal.line == 0)
5193 {
5194 /* We have no line number information. That means to stop
5195 stepping (does this always happen right after one instruction,
5196 when we do "s" in a function with no line numbers,
5197 or can this happen as a result of a return or longjmp?). */
5198 if (debug_infrun)
5199 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5200 end_stepping_range (ecs);
5201 return;
5202 }
5203
5204 /* Look for "calls" to inlined functions, part one. If the inline
5205 frame machinery detected some skipped call sites, we have entered
5206 a new inline function. */
5207
5208 if (frame_id_eq (get_frame_id (get_current_frame ()),
5209 ecs->event_thread->control.step_frame_id)
5210 && inline_skipped_frames (ecs->ptid))
5211 {
5212 struct symtab_and_line call_sal;
5213
5214 if (debug_infrun)
5215 fprintf_unfiltered (gdb_stdlog,
5216 "infrun: stepped into inlined function\n");
5217
5218 find_frame_sal (get_current_frame (), &call_sal);
5219
5220 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5221 {
5222 /* For "step", we're going to stop. But if the call site
5223 for this inlined function is on the same source line as
5224 we were previously stepping, go down into the function
5225 first. Otherwise stop at the call site. */
5226
5227 if (call_sal.line == ecs->event_thread->current_line
5228 && call_sal.symtab == ecs->event_thread->current_symtab)
5229 step_into_inline_frame (ecs->ptid);
5230
5231 end_stepping_range (ecs);
5232 return;
5233 }
5234 else
5235 {
5236 /* For "next", we should stop at the call site if it is on a
5237 different source line. Otherwise continue through the
5238 inlined function. */
5239 if (call_sal.line == ecs->event_thread->current_line
5240 && call_sal.symtab == ecs->event_thread->current_symtab)
5241 keep_going (ecs);
5242 else
5243 end_stepping_range (ecs);
5244 return;
5245 }
5246 }
5247
5248 /* Look for "calls" to inlined functions, part two. If we are still
5249 in the same real function we were stepping through, but we have
5250 to go further up to find the exact frame ID, we are stepping
5251 through a more inlined call beyond its call site. */
5252
5253 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5254 && !frame_id_eq (get_frame_id (get_current_frame ()),
5255 ecs->event_thread->control.step_frame_id)
5256 && stepped_in_from (get_current_frame (),
5257 ecs->event_thread->control.step_frame_id))
5258 {
5259 if (debug_infrun)
5260 fprintf_unfiltered (gdb_stdlog,
5261 "infrun: stepping through inlined function\n");
5262
5263 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5264 keep_going (ecs);
5265 else
5266 end_stepping_range (ecs);
5267 return;
5268 }
5269
5270 if ((stop_pc == stop_pc_sal.pc)
5271 && (ecs->event_thread->current_line != stop_pc_sal.line
5272 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5273 {
5274 /* We are at the start of a different line. So stop. Note that
5275 we don't stop if we step into the middle of a different line.
5276 That is said to make things like for (;;) statements work
5277 better. */
5278 if (debug_infrun)
5279 fprintf_unfiltered (gdb_stdlog,
5280 "infrun: stepped to a different line\n");
5281 end_stepping_range (ecs);
5282 return;
5283 }
5284
5285 /* We aren't done stepping.
5286
5287 Optimize by setting the stepping range to the line.
5288 (We might not be in the original line, but if we entered a
5289 new line in mid-statement, we continue stepping. This makes
5290 things like for(;;) statements work better.) */
5291
5292 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5293 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5294 ecs->event_thread->control.may_range_step = 1;
5295 set_step_info (frame, stop_pc_sal);
5296
5297 if (debug_infrun)
5298 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5299 keep_going (ecs);
5300 }
5301
5302 /* In all-stop mode, if we're currently stepping but have stopped in
5303 some other thread, we may need to switch back to the stepped
5304 thread. Returns true we set the inferior running, false if we left
5305 it stopped (and the event needs further processing). */
5306
5307 static int
5308 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5309 {
5310 if (!non_stop)
5311 {
5312 struct thread_info *tp;
5313 struct thread_info *stepping_thread;
5314 struct thread_info *step_over;
5315
5316 /* If any thread is blocked on some internal breakpoint, and we
5317 simply need to step over that breakpoint to get it going
5318 again, do that first. */
5319
5320 /* However, if we see an event for the stepping thread, then we
5321 know all other threads have been moved past their breakpoints
5322 already. Let the caller check whether the step is finished,
5323 etc., before deciding to move it past a breakpoint. */
5324 if (ecs->event_thread->control.step_range_end != 0)
5325 return 0;
5326
5327 /* Check if the current thread is blocked on an incomplete
5328 step-over, interrupted by a random signal. */
5329 if (ecs->event_thread->control.trap_expected
5330 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5331 {
5332 if (debug_infrun)
5333 {
5334 fprintf_unfiltered (gdb_stdlog,
5335 "infrun: need to finish step-over of [%s]\n",
5336 target_pid_to_str (ecs->event_thread->ptid));
5337 }
5338 keep_going (ecs);
5339 return 1;
5340 }
5341
5342 /* Check if the current thread is blocked by a single-step
5343 breakpoint of another thread. */
5344 if (ecs->hit_singlestep_breakpoint)
5345 {
5346 if (debug_infrun)
5347 {
5348 fprintf_unfiltered (gdb_stdlog,
5349 "infrun: need to step [%s] over single-step "
5350 "breakpoint\n",
5351 target_pid_to_str (ecs->ptid));
5352 }
5353 keep_going (ecs);
5354 return 1;
5355 }
5356
5357 /* Otherwise, we no longer expect a trap in the current thread.
5358 Clear the trap_expected flag before switching back -- this is
5359 what keep_going does as well, if we call it. */
5360 ecs->event_thread->control.trap_expected = 0;
5361
5362 /* Likewise, clear the signal if it should not be passed. */
5363 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5364 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5365
5366 /* If scheduler locking applies even if not stepping, there's no
5367 need to walk over threads. Above we've checked whether the
5368 current thread is stepping. If some other thread not the
5369 event thread is stepping, then it must be that scheduler
5370 locking is not in effect. */
5371 if (schedlock_applies (0))
5372 return 0;
5373
5374 /* Look for the stepping/nexting thread, and check if any other
5375 thread other than the stepping thread needs to start a
5376 step-over. Do all step-overs before actually proceeding with
5377 step/next/etc. */
5378 stepping_thread = NULL;
5379 step_over = NULL;
5380 ALL_NON_EXITED_THREADS (tp)
5381 {
5382 /* Ignore threads of processes we're not resuming. */
5383 if (!sched_multi
5384 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5385 continue;
5386
5387 /* When stepping over a breakpoint, we lock all threads
5388 except the one that needs to move past the breakpoint.
5389 If a non-event thread has this set, the "incomplete
5390 step-over" check above should have caught it earlier. */
5391 gdb_assert (!tp->control.trap_expected);
5392
5393 /* Did we find the stepping thread? */
5394 if (tp->control.step_range_end)
5395 {
5396 /* Yep. There should only one though. */
5397 gdb_assert (stepping_thread == NULL);
5398
5399 /* The event thread is handled at the top, before we
5400 enter this loop. */
5401 gdb_assert (tp != ecs->event_thread);
5402
5403 /* If some thread other than the event thread is
5404 stepping, then scheduler locking can't be in effect,
5405 otherwise we wouldn't have resumed the current event
5406 thread in the first place. */
5407 gdb_assert (!schedlock_applies (1));
5408
5409 stepping_thread = tp;
5410 }
5411 else if (thread_still_needs_step_over (tp))
5412 {
5413 step_over = tp;
5414
5415 /* At the top we've returned early if the event thread
5416 is stepping. If some other thread not the event
5417 thread is stepping, then scheduler locking can't be
5418 in effect, and we can resume this thread. No need to
5419 keep looking for the stepping thread then. */
5420 break;
5421 }
5422 }
5423
5424 if (step_over != NULL)
5425 {
5426 tp = step_over;
5427 if (debug_infrun)
5428 {
5429 fprintf_unfiltered (gdb_stdlog,
5430 "infrun: need to step-over [%s]\n",
5431 target_pid_to_str (tp->ptid));
5432 }
5433
5434 /* Only the stepping thread should have this set. */
5435 gdb_assert (tp->control.step_range_end == 0);
5436
5437 ecs->ptid = tp->ptid;
5438 ecs->event_thread = tp;
5439 switch_to_thread (ecs->ptid);
5440 keep_going (ecs);
5441 return 1;
5442 }
5443
5444 if (stepping_thread != NULL)
5445 {
5446 struct frame_info *frame;
5447 struct gdbarch *gdbarch;
5448
5449 tp = stepping_thread;
5450
5451 /* If the stepping thread exited, then don't try to switch
5452 back and resume it, which could fail in several different
5453 ways depending on the target. Instead, just keep going.
5454
5455 We can find a stepping dead thread in the thread list in
5456 two cases:
5457
5458 - The target supports thread exit events, and when the
5459 target tries to delete the thread from the thread list,
5460 inferior_ptid pointed at the exiting thread. In such
5461 case, calling delete_thread does not really remove the
5462 thread from the list; instead, the thread is left listed,
5463 with 'exited' state.
5464
5465 - The target's debug interface does not support thread
5466 exit events, and so we have no idea whatsoever if the
5467 previously stepping thread is still alive. For that
5468 reason, we need to synchronously query the target
5469 now. */
5470 if (is_exited (tp->ptid)
5471 || !target_thread_alive (tp->ptid))
5472 {
5473 if (debug_infrun)
5474 fprintf_unfiltered (gdb_stdlog,
5475 "infrun: not switching back to "
5476 "stepped thread, it has vanished\n");
5477
5478 delete_thread (tp->ptid);
5479 keep_going (ecs);
5480 return 1;
5481 }
5482
5483 if (debug_infrun)
5484 fprintf_unfiltered (gdb_stdlog,
5485 "infrun: switching back to stepped thread\n");
5486
5487 ecs->event_thread = tp;
5488 ecs->ptid = tp->ptid;
5489 context_switch (ecs->ptid);
5490
5491 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5492 frame = get_current_frame ();
5493 gdbarch = get_frame_arch (frame);
5494
5495 /* If the PC of the thread we were trying to single-step has
5496 changed, then that thread has trapped or been signaled,
5497 but the event has not been reported to GDB yet. Re-poll
5498 the target looking for this particular thread's event
5499 (i.e. temporarily enable schedlock) by:
5500
5501 - setting a break at the current PC
5502 - resuming that particular thread, only (by setting
5503 trap expected)
5504
5505 This prevents us continuously moving the single-step
5506 breakpoint forward, one instruction at a time,
5507 overstepping. */
5508
5509 if (gdbarch_software_single_step_p (gdbarch)
5510 && stop_pc != tp->prev_pc)
5511 {
5512 if (debug_infrun)
5513 fprintf_unfiltered (gdb_stdlog,
5514 "infrun: expected thread advanced also\n");
5515
5516 /* Clear the info of the previous step-over, as it's no
5517 longer valid. It's what keep_going would do too, if
5518 we called it. Must do this before trying to insert
5519 the sss breakpoint, otherwise if we were previously
5520 trying to step over this exact address in another
5521 thread, the breakpoint ends up not installed. */
5522 clear_step_over_info ();
5523
5524 insert_single_step_breakpoint (get_frame_arch (frame),
5525 get_frame_address_space (frame),
5526 stop_pc);
5527 ecs->event_thread->control.trap_expected = 1;
5528
5529 resume (0, GDB_SIGNAL_0);
5530 prepare_to_wait (ecs);
5531 }
5532 else
5533 {
5534 if (debug_infrun)
5535 fprintf_unfiltered (gdb_stdlog,
5536 "infrun: expected thread still "
5537 "hasn't advanced\n");
5538 keep_going (ecs);
5539 }
5540
5541 return 1;
5542 }
5543 }
5544 return 0;
5545 }
5546
5547 /* Is thread TP in the middle of single-stepping? */
5548
5549 static int
5550 currently_stepping (struct thread_info *tp)
5551 {
5552 return ((tp->control.step_range_end
5553 && tp->control.step_resume_breakpoint == NULL)
5554 || tp->control.trap_expected
5555 || bpstat_should_step ());
5556 }
5557
5558 /* Inferior has stepped into a subroutine call with source code that
5559 we should not step over. Do step to the first line of code in
5560 it. */
5561
5562 static void
5563 handle_step_into_function (struct gdbarch *gdbarch,
5564 struct execution_control_state *ecs)
5565 {
5566 struct symtab *s;
5567 struct symtab_and_line stop_func_sal, sr_sal;
5568
5569 fill_in_stop_func (gdbarch, ecs);
5570
5571 s = find_pc_symtab (stop_pc);
5572 if (s && s->language != language_asm)
5573 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5574 ecs->stop_func_start);
5575
5576 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5577 /* Use the step_resume_break to step until the end of the prologue,
5578 even if that involves jumps (as it seems to on the vax under
5579 4.2). */
5580 /* If the prologue ends in the middle of a source line, continue to
5581 the end of that source line (if it is still within the function).
5582 Otherwise, just go to end of prologue. */
5583 if (stop_func_sal.end
5584 && stop_func_sal.pc != ecs->stop_func_start
5585 && stop_func_sal.end < ecs->stop_func_end)
5586 ecs->stop_func_start = stop_func_sal.end;
5587
5588 /* Architectures which require breakpoint adjustment might not be able
5589 to place a breakpoint at the computed address. If so, the test
5590 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5591 ecs->stop_func_start to an address at which a breakpoint may be
5592 legitimately placed.
5593
5594 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5595 made, GDB will enter an infinite loop when stepping through
5596 optimized code consisting of VLIW instructions which contain
5597 subinstructions corresponding to different source lines. On
5598 FR-V, it's not permitted to place a breakpoint on any but the
5599 first subinstruction of a VLIW instruction. When a breakpoint is
5600 set, GDB will adjust the breakpoint address to the beginning of
5601 the VLIW instruction. Thus, we need to make the corresponding
5602 adjustment here when computing the stop address. */
5603
5604 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5605 {
5606 ecs->stop_func_start
5607 = gdbarch_adjust_breakpoint_address (gdbarch,
5608 ecs->stop_func_start);
5609 }
5610
5611 if (ecs->stop_func_start == stop_pc)
5612 {
5613 /* We are already there: stop now. */
5614 end_stepping_range (ecs);
5615 return;
5616 }
5617 else
5618 {
5619 /* Put the step-breakpoint there and go until there. */
5620 init_sal (&sr_sal); /* initialize to zeroes */
5621 sr_sal.pc = ecs->stop_func_start;
5622 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5623 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5624
5625 /* Do not specify what the fp should be when we stop since on
5626 some machines the prologue is where the new fp value is
5627 established. */
5628 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5629
5630 /* And make sure stepping stops right away then. */
5631 ecs->event_thread->control.step_range_end
5632 = ecs->event_thread->control.step_range_start;
5633 }
5634 keep_going (ecs);
5635 }
5636
5637 /* Inferior has stepped backward into a subroutine call with source
5638 code that we should not step over. Do step to the beginning of the
5639 last line of code in it. */
5640
5641 static void
5642 handle_step_into_function_backward (struct gdbarch *gdbarch,
5643 struct execution_control_state *ecs)
5644 {
5645 struct symtab *s;
5646 struct symtab_and_line stop_func_sal;
5647
5648 fill_in_stop_func (gdbarch, ecs);
5649
5650 s = find_pc_symtab (stop_pc);
5651 if (s && s->language != language_asm)
5652 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5653 ecs->stop_func_start);
5654
5655 stop_func_sal = find_pc_line (stop_pc, 0);
5656
5657 /* OK, we're just going to keep stepping here. */
5658 if (stop_func_sal.pc == stop_pc)
5659 {
5660 /* We're there already. Just stop stepping now. */
5661 end_stepping_range (ecs);
5662 }
5663 else
5664 {
5665 /* Else just reset the step range and keep going.
5666 No step-resume breakpoint, they don't work for
5667 epilogues, which can have multiple entry paths. */
5668 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5669 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5670 keep_going (ecs);
5671 }
5672 return;
5673 }
5674
5675 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5676 This is used to both functions and to skip over code. */
5677
5678 static void
5679 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5680 struct symtab_and_line sr_sal,
5681 struct frame_id sr_id,
5682 enum bptype sr_type)
5683 {
5684 /* There should never be more than one step-resume or longjmp-resume
5685 breakpoint per thread, so we should never be setting a new
5686 step_resume_breakpoint when one is already active. */
5687 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5688 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5689
5690 if (debug_infrun)
5691 fprintf_unfiltered (gdb_stdlog,
5692 "infrun: inserting step-resume breakpoint at %s\n",
5693 paddress (gdbarch, sr_sal.pc));
5694
5695 inferior_thread ()->control.step_resume_breakpoint
5696 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5697 }
5698
5699 void
5700 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5701 struct symtab_and_line sr_sal,
5702 struct frame_id sr_id)
5703 {
5704 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5705 sr_sal, sr_id,
5706 bp_step_resume);
5707 }
5708
5709 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5710 This is used to skip a potential signal handler.
5711
5712 This is called with the interrupted function's frame. The signal
5713 handler, when it returns, will resume the interrupted function at
5714 RETURN_FRAME.pc. */
5715
5716 static void
5717 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5718 {
5719 struct symtab_and_line sr_sal;
5720 struct gdbarch *gdbarch;
5721
5722 gdb_assert (return_frame != NULL);
5723 init_sal (&sr_sal); /* initialize to zeros */
5724
5725 gdbarch = get_frame_arch (return_frame);
5726 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5727 sr_sal.section = find_pc_overlay (sr_sal.pc);
5728 sr_sal.pspace = get_frame_program_space (return_frame);
5729
5730 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5731 get_stack_frame_id (return_frame),
5732 bp_hp_step_resume);
5733 }
5734
5735 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5736 is used to skip a function after stepping into it (for "next" or if
5737 the called function has no debugging information).
5738
5739 The current function has almost always been reached by single
5740 stepping a call or return instruction. NEXT_FRAME belongs to the
5741 current function, and the breakpoint will be set at the caller's
5742 resume address.
5743
5744 This is a separate function rather than reusing
5745 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5746 get_prev_frame, which may stop prematurely (see the implementation
5747 of frame_unwind_caller_id for an example). */
5748
5749 static void
5750 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5751 {
5752 struct symtab_and_line sr_sal;
5753 struct gdbarch *gdbarch;
5754
5755 /* We shouldn't have gotten here if we don't know where the call site
5756 is. */
5757 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5758
5759 init_sal (&sr_sal); /* initialize to zeros */
5760
5761 gdbarch = frame_unwind_caller_arch (next_frame);
5762 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5763 frame_unwind_caller_pc (next_frame));
5764 sr_sal.section = find_pc_overlay (sr_sal.pc);
5765 sr_sal.pspace = frame_unwind_program_space (next_frame);
5766
5767 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5768 frame_unwind_caller_id (next_frame));
5769 }
5770
5771 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5772 new breakpoint at the target of a jmp_buf. The handling of
5773 longjmp-resume uses the same mechanisms used for handling
5774 "step-resume" breakpoints. */
5775
5776 static void
5777 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5778 {
5779 /* There should never be more than one longjmp-resume breakpoint per
5780 thread, so we should never be setting a new
5781 longjmp_resume_breakpoint when one is already active. */
5782 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5783
5784 if (debug_infrun)
5785 fprintf_unfiltered (gdb_stdlog,
5786 "infrun: inserting longjmp-resume breakpoint at %s\n",
5787 paddress (gdbarch, pc));
5788
5789 inferior_thread ()->control.exception_resume_breakpoint =
5790 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5791 }
5792
5793 /* Insert an exception resume breakpoint. TP is the thread throwing
5794 the exception. The block B is the block of the unwinder debug hook
5795 function. FRAME is the frame corresponding to the call to this
5796 function. SYM is the symbol of the function argument holding the
5797 target PC of the exception. */
5798
5799 static void
5800 insert_exception_resume_breakpoint (struct thread_info *tp,
5801 const struct block *b,
5802 struct frame_info *frame,
5803 struct symbol *sym)
5804 {
5805 volatile struct gdb_exception e;
5806
5807 /* We want to ignore errors here. */
5808 TRY_CATCH (e, RETURN_MASK_ERROR)
5809 {
5810 struct symbol *vsym;
5811 struct value *value;
5812 CORE_ADDR handler;
5813 struct breakpoint *bp;
5814
5815 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5816 value = read_var_value (vsym, frame);
5817 /* If the value was optimized out, revert to the old behavior. */
5818 if (! value_optimized_out (value))
5819 {
5820 handler = value_as_address (value);
5821
5822 if (debug_infrun)
5823 fprintf_unfiltered (gdb_stdlog,
5824 "infrun: exception resume at %lx\n",
5825 (unsigned long) handler);
5826
5827 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5828 handler, bp_exception_resume);
5829
5830 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5831 frame = NULL;
5832
5833 bp->thread = tp->num;
5834 inferior_thread ()->control.exception_resume_breakpoint = bp;
5835 }
5836 }
5837 }
5838
5839 /* A helper for check_exception_resume that sets an
5840 exception-breakpoint based on a SystemTap probe. */
5841
5842 static void
5843 insert_exception_resume_from_probe (struct thread_info *tp,
5844 const struct bound_probe *probe,
5845 struct frame_info *frame)
5846 {
5847 struct value *arg_value;
5848 CORE_ADDR handler;
5849 struct breakpoint *bp;
5850
5851 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5852 if (!arg_value)
5853 return;
5854
5855 handler = value_as_address (arg_value);
5856
5857 if (debug_infrun)
5858 fprintf_unfiltered (gdb_stdlog,
5859 "infrun: exception resume at %s\n",
5860 paddress (get_objfile_arch (probe->objfile),
5861 handler));
5862
5863 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5864 handler, bp_exception_resume);
5865 bp->thread = tp->num;
5866 inferior_thread ()->control.exception_resume_breakpoint = bp;
5867 }
5868
5869 /* This is called when an exception has been intercepted. Check to
5870 see whether the exception's destination is of interest, and if so,
5871 set an exception resume breakpoint there. */
5872
5873 static void
5874 check_exception_resume (struct execution_control_state *ecs,
5875 struct frame_info *frame)
5876 {
5877 volatile struct gdb_exception e;
5878 struct bound_probe probe;
5879 struct symbol *func;
5880
5881 /* First see if this exception unwinding breakpoint was set via a
5882 SystemTap probe point. If so, the probe has two arguments: the
5883 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5884 set a breakpoint there. */
5885 probe = find_probe_by_pc (get_frame_pc (frame));
5886 if (probe.probe)
5887 {
5888 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5889 return;
5890 }
5891
5892 func = get_frame_function (frame);
5893 if (!func)
5894 return;
5895
5896 TRY_CATCH (e, RETURN_MASK_ERROR)
5897 {
5898 const struct block *b;
5899 struct block_iterator iter;
5900 struct symbol *sym;
5901 int argno = 0;
5902
5903 /* The exception breakpoint is a thread-specific breakpoint on
5904 the unwinder's debug hook, declared as:
5905
5906 void _Unwind_DebugHook (void *cfa, void *handler);
5907
5908 The CFA argument indicates the frame to which control is
5909 about to be transferred. HANDLER is the destination PC.
5910
5911 We ignore the CFA and set a temporary breakpoint at HANDLER.
5912 This is not extremely efficient but it avoids issues in gdb
5913 with computing the DWARF CFA, and it also works even in weird
5914 cases such as throwing an exception from inside a signal
5915 handler. */
5916
5917 b = SYMBOL_BLOCK_VALUE (func);
5918 ALL_BLOCK_SYMBOLS (b, iter, sym)
5919 {
5920 if (!SYMBOL_IS_ARGUMENT (sym))
5921 continue;
5922
5923 if (argno == 0)
5924 ++argno;
5925 else
5926 {
5927 insert_exception_resume_breakpoint (ecs->event_thread,
5928 b, frame, sym);
5929 break;
5930 }
5931 }
5932 }
5933 }
5934
5935 static void
5936 stop_waiting (struct execution_control_state *ecs)
5937 {
5938 if (debug_infrun)
5939 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
5940
5941 clear_step_over_info ();
5942
5943 /* Let callers know we don't want to wait for the inferior anymore. */
5944 ecs->wait_some_more = 0;
5945 }
5946
5947 /* Called when we should continue running the inferior, because the
5948 current event doesn't cause a user visible stop. This does the
5949 resuming part; waiting for the next event is done elsewhere. */
5950
5951 static void
5952 keep_going (struct execution_control_state *ecs)
5953 {
5954 /* Make sure normal_stop is called if we get a QUIT handled before
5955 reaching resume. */
5956 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5957
5958 /* Save the pc before execution, to compare with pc after stop. */
5959 ecs->event_thread->prev_pc
5960 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5961
5962 if (ecs->event_thread->control.trap_expected
5963 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5964 {
5965 /* We haven't yet gotten our trap, and either: intercepted a
5966 non-signal event (e.g., a fork); or took a signal which we
5967 are supposed to pass through to the inferior. Simply
5968 continue. */
5969 discard_cleanups (old_cleanups);
5970 resume (currently_stepping (ecs->event_thread),
5971 ecs->event_thread->suspend.stop_signal);
5972 }
5973 else
5974 {
5975 volatile struct gdb_exception e;
5976 struct regcache *regcache = get_current_regcache ();
5977 int remove_bp;
5978 int remove_wps;
5979
5980 /* Either the trap was not expected, but we are continuing
5981 anyway (if we got a signal, the user asked it be passed to
5982 the child)
5983 -- or --
5984 We got our expected trap, but decided we should resume from
5985 it.
5986
5987 We're going to run this baby now!
5988
5989 Note that insert_breakpoints won't try to re-insert
5990 already inserted breakpoints. Therefore, we don't
5991 care if breakpoints were already inserted, or not. */
5992
5993 /* If we need to step over a breakpoint, and we're not using
5994 displaced stepping to do so, insert all breakpoints
5995 (watchpoints, etc.) but the one we're stepping over, step one
5996 instruction, and then re-insert the breakpoint when that step
5997 is finished. */
5998
5999 remove_bp = (ecs->hit_singlestep_breakpoint
6000 || thread_still_needs_step_over (ecs->event_thread));
6001 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6002 && !target_have_steppable_watchpoint);
6003
6004 if (remove_bp && !use_displaced_stepping (get_regcache_arch (regcache)))
6005 {
6006 set_step_over_info (get_regcache_aspace (regcache),
6007 regcache_read_pc (regcache), remove_wps);
6008 }
6009 else if (remove_wps)
6010 set_step_over_info (NULL, 0, remove_wps);
6011 else
6012 clear_step_over_info ();
6013
6014 /* Stop stepping if inserting breakpoints fails. */
6015 TRY_CATCH (e, RETURN_MASK_ERROR)
6016 {
6017 insert_breakpoints ();
6018 }
6019 if (e.reason < 0)
6020 {
6021 exception_print (gdb_stderr, e);
6022 stop_waiting (ecs);
6023 return;
6024 }
6025
6026 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6027
6028 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6029 explicitly specifies that such a signal should be delivered
6030 to the target program). Typically, that would occur when a
6031 user is debugging a target monitor on a simulator: the target
6032 monitor sets a breakpoint; the simulator encounters this
6033 breakpoint and halts the simulation handing control to GDB;
6034 GDB, noting that the stop address doesn't map to any known
6035 breakpoint, returns control back to the simulator; the
6036 simulator then delivers the hardware equivalent of a
6037 GDB_SIGNAL_TRAP to the program being debugged. */
6038 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6039 && !signal_program[ecs->event_thread->suspend.stop_signal])
6040 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6041
6042 discard_cleanups (old_cleanups);
6043 resume (currently_stepping (ecs->event_thread),
6044 ecs->event_thread->suspend.stop_signal);
6045 }
6046
6047 prepare_to_wait (ecs);
6048 }
6049
6050 /* This function normally comes after a resume, before
6051 handle_inferior_event exits. It takes care of any last bits of
6052 housekeeping, and sets the all-important wait_some_more flag. */
6053
6054 static void
6055 prepare_to_wait (struct execution_control_state *ecs)
6056 {
6057 if (debug_infrun)
6058 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6059
6060 /* This is the old end of the while loop. Let everybody know we
6061 want to wait for the inferior some more and get called again
6062 soon. */
6063 ecs->wait_some_more = 1;
6064 }
6065
6066 /* We are done with the step range of a step/next/si/ni command.
6067 Called once for each n of a "step n" operation. */
6068
6069 static void
6070 end_stepping_range (struct execution_control_state *ecs)
6071 {
6072 ecs->event_thread->control.stop_step = 1;
6073 stop_waiting (ecs);
6074 }
6075
6076 /* Several print_*_reason functions to print why the inferior has stopped.
6077 We always print something when the inferior exits, or receives a signal.
6078 The rest of the cases are dealt with later on in normal_stop and
6079 print_it_typical. Ideally there should be a call to one of these
6080 print_*_reason functions functions from handle_inferior_event each time
6081 stop_waiting is called.
6082
6083 Note that we don't call these directly, instead we delegate that to
6084 the interpreters, through observers. Interpreters then call these
6085 with whatever uiout is right. */
6086
6087 void
6088 print_end_stepping_range_reason (struct ui_out *uiout)
6089 {
6090 /* For CLI-like interpreters, print nothing. */
6091
6092 if (ui_out_is_mi_like_p (uiout))
6093 {
6094 ui_out_field_string (uiout, "reason",
6095 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6096 }
6097 }
6098
6099 void
6100 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6101 {
6102 annotate_signalled ();
6103 if (ui_out_is_mi_like_p (uiout))
6104 ui_out_field_string
6105 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6106 ui_out_text (uiout, "\nProgram terminated with signal ");
6107 annotate_signal_name ();
6108 ui_out_field_string (uiout, "signal-name",
6109 gdb_signal_to_name (siggnal));
6110 annotate_signal_name_end ();
6111 ui_out_text (uiout, ", ");
6112 annotate_signal_string ();
6113 ui_out_field_string (uiout, "signal-meaning",
6114 gdb_signal_to_string (siggnal));
6115 annotate_signal_string_end ();
6116 ui_out_text (uiout, ".\n");
6117 ui_out_text (uiout, "The program no longer exists.\n");
6118 }
6119
6120 void
6121 print_exited_reason (struct ui_out *uiout, int exitstatus)
6122 {
6123 struct inferior *inf = current_inferior ();
6124 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6125
6126 annotate_exited (exitstatus);
6127 if (exitstatus)
6128 {
6129 if (ui_out_is_mi_like_p (uiout))
6130 ui_out_field_string (uiout, "reason",
6131 async_reason_lookup (EXEC_ASYNC_EXITED));
6132 ui_out_text (uiout, "[Inferior ");
6133 ui_out_text (uiout, plongest (inf->num));
6134 ui_out_text (uiout, " (");
6135 ui_out_text (uiout, pidstr);
6136 ui_out_text (uiout, ") exited with code ");
6137 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6138 ui_out_text (uiout, "]\n");
6139 }
6140 else
6141 {
6142 if (ui_out_is_mi_like_p (uiout))
6143 ui_out_field_string
6144 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6145 ui_out_text (uiout, "[Inferior ");
6146 ui_out_text (uiout, plongest (inf->num));
6147 ui_out_text (uiout, " (");
6148 ui_out_text (uiout, pidstr);
6149 ui_out_text (uiout, ") exited normally]\n");
6150 }
6151 }
6152
6153 void
6154 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6155 {
6156 annotate_signal ();
6157
6158 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6159 {
6160 struct thread_info *t = inferior_thread ();
6161
6162 ui_out_text (uiout, "\n[");
6163 ui_out_field_string (uiout, "thread-name",
6164 target_pid_to_str (t->ptid));
6165 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6166 ui_out_text (uiout, " stopped");
6167 }
6168 else
6169 {
6170 ui_out_text (uiout, "\nProgram received signal ");
6171 annotate_signal_name ();
6172 if (ui_out_is_mi_like_p (uiout))
6173 ui_out_field_string
6174 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6175 ui_out_field_string (uiout, "signal-name",
6176 gdb_signal_to_name (siggnal));
6177 annotate_signal_name_end ();
6178 ui_out_text (uiout, ", ");
6179 annotate_signal_string ();
6180 ui_out_field_string (uiout, "signal-meaning",
6181 gdb_signal_to_string (siggnal));
6182 annotate_signal_string_end ();
6183 }
6184 ui_out_text (uiout, ".\n");
6185 }
6186
6187 void
6188 print_no_history_reason (struct ui_out *uiout)
6189 {
6190 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6191 }
6192
6193 /* Print current location without a level number, if we have changed
6194 functions or hit a breakpoint. Print source line if we have one.
6195 bpstat_print contains the logic deciding in detail what to print,
6196 based on the event(s) that just occurred. */
6197
6198 void
6199 print_stop_event (struct target_waitstatus *ws)
6200 {
6201 int bpstat_ret;
6202 int source_flag;
6203 int do_frame_printing = 1;
6204 struct thread_info *tp = inferior_thread ();
6205
6206 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6207 switch (bpstat_ret)
6208 {
6209 case PRINT_UNKNOWN:
6210 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6211 should) carry around the function and does (or should) use
6212 that when doing a frame comparison. */
6213 if (tp->control.stop_step
6214 && frame_id_eq (tp->control.step_frame_id,
6215 get_frame_id (get_current_frame ()))
6216 && step_start_function == find_pc_function (stop_pc))
6217 {
6218 /* Finished step, just print source line. */
6219 source_flag = SRC_LINE;
6220 }
6221 else
6222 {
6223 /* Print location and source line. */
6224 source_flag = SRC_AND_LOC;
6225 }
6226 break;
6227 case PRINT_SRC_AND_LOC:
6228 /* Print location and source line. */
6229 source_flag = SRC_AND_LOC;
6230 break;
6231 case PRINT_SRC_ONLY:
6232 source_flag = SRC_LINE;
6233 break;
6234 case PRINT_NOTHING:
6235 /* Something bogus. */
6236 source_flag = SRC_LINE;
6237 do_frame_printing = 0;
6238 break;
6239 default:
6240 internal_error (__FILE__, __LINE__, _("Unknown value."));
6241 }
6242
6243 /* The behavior of this routine with respect to the source
6244 flag is:
6245 SRC_LINE: Print only source line
6246 LOCATION: Print only location
6247 SRC_AND_LOC: Print location and source line. */
6248 if (do_frame_printing)
6249 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6250
6251 /* Display the auto-display expressions. */
6252 do_displays ();
6253 }
6254
6255 /* Here to return control to GDB when the inferior stops for real.
6256 Print appropriate messages, remove breakpoints, give terminal our modes.
6257
6258 STOP_PRINT_FRAME nonzero means print the executing frame
6259 (pc, function, args, file, line number and line text).
6260 BREAKPOINTS_FAILED nonzero means stop was due to error
6261 attempting to insert breakpoints. */
6262
6263 void
6264 normal_stop (void)
6265 {
6266 struct target_waitstatus last;
6267 ptid_t last_ptid;
6268 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6269
6270 get_last_target_status (&last_ptid, &last);
6271
6272 /* If an exception is thrown from this point on, make sure to
6273 propagate GDB's knowledge of the executing state to the
6274 frontend/user running state. A QUIT is an easy exception to see
6275 here, so do this before any filtered output. */
6276 if (!non_stop)
6277 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6278 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6279 && last.kind != TARGET_WAITKIND_EXITED
6280 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6281 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6282
6283 /* As we're presenting a stop, and potentially removing breakpoints,
6284 update the thread list so we can tell whether there are threads
6285 running on the target. With target remote, for example, we can
6286 only learn about new threads when we explicitly update the thread
6287 list. Do this before notifying the interpreters about signal
6288 stops, end of stepping ranges, etc., so that the "new thread"
6289 output is emitted before e.g., "Program received signal FOO",
6290 instead of after. */
6291 update_thread_list ();
6292
6293 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6294 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6295
6296 /* As with the notification of thread events, we want to delay
6297 notifying the user that we've switched thread context until
6298 the inferior actually stops.
6299
6300 There's no point in saying anything if the inferior has exited.
6301 Note that SIGNALLED here means "exited with a signal", not
6302 "received a signal".
6303
6304 Also skip saying anything in non-stop mode. In that mode, as we
6305 don't want GDB to switch threads behind the user's back, to avoid
6306 races where the user is typing a command to apply to thread x,
6307 but GDB switches to thread y before the user finishes entering
6308 the command, fetch_inferior_event installs a cleanup to restore
6309 the current thread back to the thread the user had selected right
6310 after this event is handled, so we're not really switching, only
6311 informing of a stop. */
6312 if (!non_stop
6313 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6314 && target_has_execution
6315 && last.kind != TARGET_WAITKIND_SIGNALLED
6316 && last.kind != TARGET_WAITKIND_EXITED
6317 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6318 {
6319 target_terminal_ours_for_output ();
6320 printf_filtered (_("[Switching to %s]\n"),
6321 target_pid_to_str (inferior_ptid));
6322 annotate_thread_changed ();
6323 previous_inferior_ptid = inferior_ptid;
6324 }
6325
6326 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6327 {
6328 gdb_assert (sync_execution || !target_can_async_p ());
6329
6330 target_terminal_ours_for_output ();
6331 printf_filtered (_("No unwaited-for children left.\n"));
6332 }
6333
6334 /* Note: this depends on the update_thread_list call above. */
6335 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6336 {
6337 if (remove_breakpoints ())
6338 {
6339 target_terminal_ours_for_output ();
6340 printf_filtered (_("Cannot remove breakpoints because "
6341 "program is no longer writable.\nFurther "
6342 "execution is probably impossible.\n"));
6343 }
6344 }
6345
6346 /* If an auto-display called a function and that got a signal,
6347 delete that auto-display to avoid an infinite recursion. */
6348
6349 if (stopped_by_random_signal)
6350 disable_current_display ();
6351
6352 /* Notify observers if we finished a "step"-like command, etc. */
6353 if (target_has_execution
6354 && last.kind != TARGET_WAITKIND_SIGNALLED
6355 && last.kind != TARGET_WAITKIND_EXITED
6356 && inferior_thread ()->control.stop_step)
6357 {
6358 /* But not if in the middle of doing a "step n" operation for
6359 n > 1 */
6360 if (inferior_thread ()->step_multi)
6361 goto done;
6362
6363 observer_notify_end_stepping_range ();
6364 }
6365
6366 target_terminal_ours ();
6367 async_enable_stdin ();
6368
6369 /* Set the current source location. This will also happen if we
6370 display the frame below, but the current SAL will be incorrect
6371 during a user hook-stop function. */
6372 if (has_stack_frames () && !stop_stack_dummy)
6373 set_current_sal_from_frame (get_current_frame ());
6374
6375 /* Let the user/frontend see the threads as stopped, but do nothing
6376 if the thread was running an infcall. We may be e.g., evaluating
6377 a breakpoint condition. In that case, the thread had state
6378 THREAD_RUNNING before the infcall, and shall remain set to
6379 running, all without informing the user/frontend about state
6380 transition changes. If this is actually a call command, then the
6381 thread was originally already stopped, so there's no state to
6382 finish either. */
6383 if (target_has_execution && inferior_thread ()->control.in_infcall)
6384 discard_cleanups (old_chain);
6385 else
6386 do_cleanups (old_chain);
6387
6388 /* Look up the hook_stop and run it (CLI internally handles problem
6389 of stop_command's pre-hook not existing). */
6390 if (stop_command)
6391 catch_errors (hook_stop_stub, stop_command,
6392 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6393
6394 if (!has_stack_frames ())
6395 goto done;
6396
6397 if (last.kind == TARGET_WAITKIND_SIGNALLED
6398 || last.kind == TARGET_WAITKIND_EXITED)
6399 goto done;
6400
6401 /* Select innermost stack frame - i.e., current frame is frame 0,
6402 and current location is based on that.
6403 Don't do this on return from a stack dummy routine,
6404 or if the program has exited. */
6405
6406 if (!stop_stack_dummy)
6407 {
6408 select_frame (get_current_frame ());
6409
6410 /* If --batch-silent is enabled then there's no need to print the current
6411 source location, and to try risks causing an error message about
6412 missing source files. */
6413 if (stop_print_frame && !batch_silent)
6414 print_stop_event (&last);
6415 }
6416
6417 /* Save the function value return registers, if we care.
6418 We might be about to restore their previous contents. */
6419 if (inferior_thread ()->control.proceed_to_finish
6420 && execution_direction != EXEC_REVERSE)
6421 {
6422 /* This should not be necessary. */
6423 if (stop_registers)
6424 regcache_xfree (stop_registers);
6425
6426 /* NB: The copy goes through to the target picking up the value of
6427 all the registers. */
6428 stop_registers = regcache_dup (get_current_regcache ());
6429 }
6430
6431 if (stop_stack_dummy == STOP_STACK_DUMMY)
6432 {
6433 /* Pop the empty frame that contains the stack dummy.
6434 This also restores inferior state prior to the call
6435 (struct infcall_suspend_state). */
6436 struct frame_info *frame = get_current_frame ();
6437
6438 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6439 frame_pop (frame);
6440 /* frame_pop() calls reinit_frame_cache as the last thing it
6441 does which means there's currently no selected frame. We
6442 don't need to re-establish a selected frame if the dummy call
6443 returns normally, that will be done by
6444 restore_infcall_control_state. However, we do have to handle
6445 the case where the dummy call is returning after being
6446 stopped (e.g. the dummy call previously hit a breakpoint).
6447 We can't know which case we have so just always re-establish
6448 a selected frame here. */
6449 select_frame (get_current_frame ());
6450 }
6451
6452 done:
6453 annotate_stopped ();
6454
6455 /* Suppress the stop observer if we're in the middle of:
6456
6457 - a step n (n > 1), as there still more steps to be done.
6458
6459 - a "finish" command, as the observer will be called in
6460 finish_command_continuation, so it can include the inferior
6461 function's return value.
6462
6463 - calling an inferior function, as we pretend we inferior didn't
6464 run at all. The return value of the call is handled by the
6465 expression evaluator, through call_function_by_hand. */
6466
6467 if (!target_has_execution
6468 || last.kind == TARGET_WAITKIND_SIGNALLED
6469 || last.kind == TARGET_WAITKIND_EXITED
6470 || last.kind == TARGET_WAITKIND_NO_RESUMED
6471 || (!(inferior_thread ()->step_multi
6472 && inferior_thread ()->control.stop_step)
6473 && !(inferior_thread ()->control.stop_bpstat
6474 && inferior_thread ()->control.proceed_to_finish)
6475 && !inferior_thread ()->control.in_infcall))
6476 {
6477 if (!ptid_equal (inferior_ptid, null_ptid))
6478 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6479 stop_print_frame);
6480 else
6481 observer_notify_normal_stop (NULL, stop_print_frame);
6482 }
6483
6484 if (target_has_execution)
6485 {
6486 if (last.kind != TARGET_WAITKIND_SIGNALLED
6487 && last.kind != TARGET_WAITKIND_EXITED)
6488 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6489 Delete any breakpoint that is to be deleted at the next stop. */
6490 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6491 }
6492
6493 /* Try to get rid of automatically added inferiors that are no
6494 longer needed. Keeping those around slows down things linearly.
6495 Note that this never removes the current inferior. */
6496 prune_inferiors ();
6497 }
6498
6499 static int
6500 hook_stop_stub (void *cmd)
6501 {
6502 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6503 return (0);
6504 }
6505 \f
6506 int
6507 signal_stop_state (int signo)
6508 {
6509 return signal_stop[signo];
6510 }
6511
6512 int
6513 signal_print_state (int signo)
6514 {
6515 return signal_print[signo];
6516 }
6517
6518 int
6519 signal_pass_state (int signo)
6520 {
6521 return signal_program[signo];
6522 }
6523
6524 static void
6525 signal_cache_update (int signo)
6526 {
6527 if (signo == -1)
6528 {
6529 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6530 signal_cache_update (signo);
6531
6532 return;
6533 }
6534
6535 signal_pass[signo] = (signal_stop[signo] == 0
6536 && signal_print[signo] == 0
6537 && signal_program[signo] == 1
6538 && signal_catch[signo] == 0);
6539 }
6540
6541 int
6542 signal_stop_update (int signo, int state)
6543 {
6544 int ret = signal_stop[signo];
6545
6546 signal_stop[signo] = state;
6547 signal_cache_update (signo);
6548 return ret;
6549 }
6550
6551 int
6552 signal_print_update (int signo, int state)
6553 {
6554 int ret = signal_print[signo];
6555
6556 signal_print[signo] = state;
6557 signal_cache_update (signo);
6558 return ret;
6559 }
6560
6561 int
6562 signal_pass_update (int signo, int state)
6563 {
6564 int ret = signal_program[signo];
6565
6566 signal_program[signo] = state;
6567 signal_cache_update (signo);
6568 return ret;
6569 }
6570
6571 /* Update the global 'signal_catch' from INFO and notify the
6572 target. */
6573
6574 void
6575 signal_catch_update (const unsigned int *info)
6576 {
6577 int i;
6578
6579 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6580 signal_catch[i] = info[i] > 0;
6581 signal_cache_update (-1);
6582 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6583 }
6584
6585 static void
6586 sig_print_header (void)
6587 {
6588 printf_filtered (_("Signal Stop\tPrint\tPass "
6589 "to program\tDescription\n"));
6590 }
6591
6592 static void
6593 sig_print_info (enum gdb_signal oursig)
6594 {
6595 const char *name = gdb_signal_to_name (oursig);
6596 int name_padding = 13 - strlen (name);
6597
6598 if (name_padding <= 0)
6599 name_padding = 0;
6600
6601 printf_filtered ("%s", name);
6602 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6603 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6604 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6605 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6606 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6607 }
6608
6609 /* Specify how various signals in the inferior should be handled. */
6610
6611 static void
6612 handle_command (char *args, int from_tty)
6613 {
6614 char **argv;
6615 int digits, wordlen;
6616 int sigfirst, signum, siglast;
6617 enum gdb_signal oursig;
6618 int allsigs;
6619 int nsigs;
6620 unsigned char *sigs;
6621 struct cleanup *old_chain;
6622
6623 if (args == NULL)
6624 {
6625 error_no_arg (_("signal to handle"));
6626 }
6627
6628 /* Allocate and zero an array of flags for which signals to handle. */
6629
6630 nsigs = (int) GDB_SIGNAL_LAST;
6631 sigs = (unsigned char *) alloca (nsigs);
6632 memset (sigs, 0, nsigs);
6633
6634 /* Break the command line up into args. */
6635
6636 argv = gdb_buildargv (args);
6637 old_chain = make_cleanup_freeargv (argv);
6638
6639 /* Walk through the args, looking for signal oursigs, signal names, and
6640 actions. Signal numbers and signal names may be interspersed with
6641 actions, with the actions being performed for all signals cumulatively
6642 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6643
6644 while (*argv != NULL)
6645 {
6646 wordlen = strlen (*argv);
6647 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6648 {;
6649 }
6650 allsigs = 0;
6651 sigfirst = siglast = -1;
6652
6653 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6654 {
6655 /* Apply action to all signals except those used by the
6656 debugger. Silently skip those. */
6657 allsigs = 1;
6658 sigfirst = 0;
6659 siglast = nsigs - 1;
6660 }
6661 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6662 {
6663 SET_SIGS (nsigs, sigs, signal_stop);
6664 SET_SIGS (nsigs, sigs, signal_print);
6665 }
6666 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6667 {
6668 UNSET_SIGS (nsigs, sigs, signal_program);
6669 }
6670 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6671 {
6672 SET_SIGS (nsigs, sigs, signal_print);
6673 }
6674 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6675 {
6676 SET_SIGS (nsigs, sigs, signal_program);
6677 }
6678 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6679 {
6680 UNSET_SIGS (nsigs, sigs, signal_stop);
6681 }
6682 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6683 {
6684 SET_SIGS (nsigs, sigs, signal_program);
6685 }
6686 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6687 {
6688 UNSET_SIGS (nsigs, sigs, signal_print);
6689 UNSET_SIGS (nsigs, sigs, signal_stop);
6690 }
6691 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6692 {
6693 UNSET_SIGS (nsigs, sigs, signal_program);
6694 }
6695 else if (digits > 0)
6696 {
6697 /* It is numeric. The numeric signal refers to our own
6698 internal signal numbering from target.h, not to host/target
6699 signal number. This is a feature; users really should be
6700 using symbolic names anyway, and the common ones like
6701 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6702
6703 sigfirst = siglast = (int)
6704 gdb_signal_from_command (atoi (*argv));
6705 if ((*argv)[digits] == '-')
6706 {
6707 siglast = (int)
6708 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6709 }
6710 if (sigfirst > siglast)
6711 {
6712 /* Bet he didn't figure we'd think of this case... */
6713 signum = sigfirst;
6714 sigfirst = siglast;
6715 siglast = signum;
6716 }
6717 }
6718 else
6719 {
6720 oursig = gdb_signal_from_name (*argv);
6721 if (oursig != GDB_SIGNAL_UNKNOWN)
6722 {
6723 sigfirst = siglast = (int) oursig;
6724 }
6725 else
6726 {
6727 /* Not a number and not a recognized flag word => complain. */
6728 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6729 }
6730 }
6731
6732 /* If any signal numbers or symbol names were found, set flags for
6733 which signals to apply actions to. */
6734
6735 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6736 {
6737 switch ((enum gdb_signal) signum)
6738 {
6739 case GDB_SIGNAL_TRAP:
6740 case GDB_SIGNAL_INT:
6741 if (!allsigs && !sigs[signum])
6742 {
6743 if (query (_("%s is used by the debugger.\n\
6744 Are you sure you want to change it? "),
6745 gdb_signal_to_name ((enum gdb_signal) signum)))
6746 {
6747 sigs[signum] = 1;
6748 }
6749 else
6750 {
6751 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6752 gdb_flush (gdb_stdout);
6753 }
6754 }
6755 break;
6756 case GDB_SIGNAL_0:
6757 case GDB_SIGNAL_DEFAULT:
6758 case GDB_SIGNAL_UNKNOWN:
6759 /* Make sure that "all" doesn't print these. */
6760 break;
6761 default:
6762 sigs[signum] = 1;
6763 break;
6764 }
6765 }
6766
6767 argv++;
6768 }
6769
6770 for (signum = 0; signum < nsigs; signum++)
6771 if (sigs[signum])
6772 {
6773 signal_cache_update (-1);
6774 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6775 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6776
6777 if (from_tty)
6778 {
6779 /* Show the results. */
6780 sig_print_header ();
6781 for (; signum < nsigs; signum++)
6782 if (sigs[signum])
6783 sig_print_info (signum);
6784 }
6785
6786 break;
6787 }
6788
6789 do_cleanups (old_chain);
6790 }
6791
6792 /* Complete the "handle" command. */
6793
6794 static VEC (char_ptr) *
6795 handle_completer (struct cmd_list_element *ignore,
6796 const char *text, const char *word)
6797 {
6798 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6799 static const char * const keywords[] =
6800 {
6801 "all",
6802 "stop",
6803 "ignore",
6804 "print",
6805 "pass",
6806 "nostop",
6807 "noignore",
6808 "noprint",
6809 "nopass",
6810 NULL,
6811 };
6812
6813 vec_signals = signal_completer (ignore, text, word);
6814 vec_keywords = complete_on_enum (keywords, word, word);
6815
6816 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6817 VEC_free (char_ptr, vec_signals);
6818 VEC_free (char_ptr, vec_keywords);
6819 return return_val;
6820 }
6821
6822 static void
6823 xdb_handle_command (char *args, int from_tty)
6824 {
6825 char **argv;
6826 struct cleanup *old_chain;
6827
6828 if (args == NULL)
6829 error_no_arg (_("xdb command"));
6830
6831 /* Break the command line up into args. */
6832
6833 argv = gdb_buildargv (args);
6834 old_chain = make_cleanup_freeargv (argv);
6835 if (argv[1] != (char *) NULL)
6836 {
6837 char *argBuf;
6838 int bufLen;
6839
6840 bufLen = strlen (argv[0]) + 20;
6841 argBuf = (char *) xmalloc (bufLen);
6842 if (argBuf)
6843 {
6844 int validFlag = 1;
6845 enum gdb_signal oursig;
6846
6847 oursig = gdb_signal_from_name (argv[0]);
6848 memset (argBuf, 0, bufLen);
6849 if (strcmp (argv[1], "Q") == 0)
6850 sprintf (argBuf, "%s %s", argv[0], "noprint");
6851 else
6852 {
6853 if (strcmp (argv[1], "s") == 0)
6854 {
6855 if (!signal_stop[oursig])
6856 sprintf (argBuf, "%s %s", argv[0], "stop");
6857 else
6858 sprintf (argBuf, "%s %s", argv[0], "nostop");
6859 }
6860 else if (strcmp (argv[1], "i") == 0)
6861 {
6862 if (!signal_program[oursig])
6863 sprintf (argBuf, "%s %s", argv[0], "pass");
6864 else
6865 sprintf (argBuf, "%s %s", argv[0], "nopass");
6866 }
6867 else if (strcmp (argv[1], "r") == 0)
6868 {
6869 if (!signal_print[oursig])
6870 sprintf (argBuf, "%s %s", argv[0], "print");
6871 else
6872 sprintf (argBuf, "%s %s", argv[0], "noprint");
6873 }
6874 else
6875 validFlag = 0;
6876 }
6877 if (validFlag)
6878 handle_command (argBuf, from_tty);
6879 else
6880 printf_filtered (_("Invalid signal handling flag.\n"));
6881 if (argBuf)
6882 xfree (argBuf);
6883 }
6884 }
6885 do_cleanups (old_chain);
6886 }
6887
6888 enum gdb_signal
6889 gdb_signal_from_command (int num)
6890 {
6891 if (num >= 1 && num <= 15)
6892 return (enum gdb_signal) num;
6893 error (_("Only signals 1-15 are valid as numeric signals.\n\
6894 Use \"info signals\" for a list of symbolic signals."));
6895 }
6896
6897 /* Print current contents of the tables set by the handle command.
6898 It is possible we should just be printing signals actually used
6899 by the current target (but for things to work right when switching
6900 targets, all signals should be in the signal tables). */
6901
6902 static void
6903 signals_info (char *signum_exp, int from_tty)
6904 {
6905 enum gdb_signal oursig;
6906
6907 sig_print_header ();
6908
6909 if (signum_exp)
6910 {
6911 /* First see if this is a symbol name. */
6912 oursig = gdb_signal_from_name (signum_exp);
6913 if (oursig == GDB_SIGNAL_UNKNOWN)
6914 {
6915 /* No, try numeric. */
6916 oursig =
6917 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6918 }
6919 sig_print_info (oursig);
6920 return;
6921 }
6922
6923 printf_filtered ("\n");
6924 /* These ugly casts brought to you by the native VAX compiler. */
6925 for (oursig = GDB_SIGNAL_FIRST;
6926 (int) oursig < (int) GDB_SIGNAL_LAST;
6927 oursig = (enum gdb_signal) ((int) oursig + 1))
6928 {
6929 QUIT;
6930
6931 if (oursig != GDB_SIGNAL_UNKNOWN
6932 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6933 sig_print_info (oursig);
6934 }
6935
6936 printf_filtered (_("\nUse the \"handle\" command "
6937 "to change these tables.\n"));
6938 }
6939
6940 /* Check if it makes sense to read $_siginfo from the current thread
6941 at this point. If not, throw an error. */
6942
6943 static void
6944 validate_siginfo_access (void)
6945 {
6946 /* No current inferior, no siginfo. */
6947 if (ptid_equal (inferior_ptid, null_ptid))
6948 error (_("No thread selected."));
6949
6950 /* Don't try to read from a dead thread. */
6951 if (is_exited (inferior_ptid))
6952 error (_("The current thread has terminated"));
6953
6954 /* ... or from a spinning thread. */
6955 if (is_running (inferior_ptid))
6956 error (_("Selected thread is running."));
6957 }
6958
6959 /* The $_siginfo convenience variable is a bit special. We don't know
6960 for sure the type of the value until we actually have a chance to
6961 fetch the data. The type can change depending on gdbarch, so it is
6962 also dependent on which thread you have selected.
6963
6964 1. making $_siginfo be an internalvar that creates a new value on
6965 access.
6966
6967 2. making the value of $_siginfo be an lval_computed value. */
6968
6969 /* This function implements the lval_computed support for reading a
6970 $_siginfo value. */
6971
6972 static void
6973 siginfo_value_read (struct value *v)
6974 {
6975 LONGEST transferred;
6976
6977 validate_siginfo_access ();
6978
6979 transferred =
6980 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6981 NULL,
6982 value_contents_all_raw (v),
6983 value_offset (v),
6984 TYPE_LENGTH (value_type (v)));
6985
6986 if (transferred != TYPE_LENGTH (value_type (v)))
6987 error (_("Unable to read siginfo"));
6988 }
6989
6990 /* This function implements the lval_computed support for writing a
6991 $_siginfo value. */
6992
6993 static void
6994 siginfo_value_write (struct value *v, struct value *fromval)
6995 {
6996 LONGEST transferred;
6997
6998 validate_siginfo_access ();
6999
7000 transferred = target_write (&current_target,
7001 TARGET_OBJECT_SIGNAL_INFO,
7002 NULL,
7003 value_contents_all_raw (fromval),
7004 value_offset (v),
7005 TYPE_LENGTH (value_type (fromval)));
7006
7007 if (transferred != TYPE_LENGTH (value_type (fromval)))
7008 error (_("Unable to write siginfo"));
7009 }
7010
7011 static const struct lval_funcs siginfo_value_funcs =
7012 {
7013 siginfo_value_read,
7014 siginfo_value_write
7015 };
7016
7017 /* Return a new value with the correct type for the siginfo object of
7018 the current thread using architecture GDBARCH. Return a void value
7019 if there's no object available. */
7020
7021 static struct value *
7022 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7023 void *ignore)
7024 {
7025 if (target_has_stack
7026 && !ptid_equal (inferior_ptid, null_ptid)
7027 && gdbarch_get_siginfo_type_p (gdbarch))
7028 {
7029 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7030
7031 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7032 }
7033
7034 return allocate_value (builtin_type (gdbarch)->builtin_void);
7035 }
7036
7037 \f
7038 /* infcall_suspend_state contains state about the program itself like its
7039 registers and any signal it received when it last stopped.
7040 This state must be restored regardless of how the inferior function call
7041 ends (either successfully, or after it hits a breakpoint or signal)
7042 if the program is to properly continue where it left off. */
7043
7044 struct infcall_suspend_state
7045 {
7046 struct thread_suspend_state thread_suspend;
7047 #if 0 /* Currently unused and empty structures are not valid C. */
7048 struct inferior_suspend_state inferior_suspend;
7049 #endif
7050
7051 /* Other fields: */
7052 CORE_ADDR stop_pc;
7053 struct regcache *registers;
7054
7055 /* Format of SIGINFO_DATA or NULL if it is not present. */
7056 struct gdbarch *siginfo_gdbarch;
7057
7058 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7059 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7060 content would be invalid. */
7061 gdb_byte *siginfo_data;
7062 };
7063
7064 struct infcall_suspend_state *
7065 save_infcall_suspend_state (void)
7066 {
7067 struct infcall_suspend_state *inf_state;
7068 struct thread_info *tp = inferior_thread ();
7069 #if 0
7070 struct inferior *inf = current_inferior ();
7071 #endif
7072 struct regcache *regcache = get_current_regcache ();
7073 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7074 gdb_byte *siginfo_data = NULL;
7075
7076 if (gdbarch_get_siginfo_type_p (gdbarch))
7077 {
7078 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7079 size_t len = TYPE_LENGTH (type);
7080 struct cleanup *back_to;
7081
7082 siginfo_data = xmalloc (len);
7083 back_to = make_cleanup (xfree, siginfo_data);
7084
7085 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7086 siginfo_data, 0, len) == len)
7087 discard_cleanups (back_to);
7088 else
7089 {
7090 /* Errors ignored. */
7091 do_cleanups (back_to);
7092 siginfo_data = NULL;
7093 }
7094 }
7095
7096 inf_state = XCNEW (struct infcall_suspend_state);
7097
7098 if (siginfo_data)
7099 {
7100 inf_state->siginfo_gdbarch = gdbarch;
7101 inf_state->siginfo_data = siginfo_data;
7102 }
7103
7104 inf_state->thread_suspend = tp->suspend;
7105 #if 0 /* Currently unused and empty structures are not valid C. */
7106 inf_state->inferior_suspend = inf->suspend;
7107 #endif
7108
7109 /* run_inferior_call will not use the signal due to its `proceed' call with
7110 GDB_SIGNAL_0 anyway. */
7111 tp->suspend.stop_signal = GDB_SIGNAL_0;
7112
7113 inf_state->stop_pc = stop_pc;
7114
7115 inf_state->registers = regcache_dup (regcache);
7116
7117 return inf_state;
7118 }
7119
7120 /* Restore inferior session state to INF_STATE. */
7121
7122 void
7123 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7124 {
7125 struct thread_info *tp = inferior_thread ();
7126 #if 0
7127 struct inferior *inf = current_inferior ();
7128 #endif
7129 struct regcache *regcache = get_current_regcache ();
7130 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7131
7132 tp->suspend = inf_state->thread_suspend;
7133 #if 0 /* Currently unused and empty structures are not valid C. */
7134 inf->suspend = inf_state->inferior_suspend;
7135 #endif
7136
7137 stop_pc = inf_state->stop_pc;
7138
7139 if (inf_state->siginfo_gdbarch == gdbarch)
7140 {
7141 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7142
7143 /* Errors ignored. */
7144 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7145 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7146 }
7147
7148 /* The inferior can be gone if the user types "print exit(0)"
7149 (and perhaps other times). */
7150 if (target_has_execution)
7151 /* NB: The register write goes through to the target. */
7152 regcache_cpy (regcache, inf_state->registers);
7153
7154 discard_infcall_suspend_state (inf_state);
7155 }
7156
7157 static void
7158 do_restore_infcall_suspend_state_cleanup (void *state)
7159 {
7160 restore_infcall_suspend_state (state);
7161 }
7162
7163 struct cleanup *
7164 make_cleanup_restore_infcall_suspend_state
7165 (struct infcall_suspend_state *inf_state)
7166 {
7167 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7168 }
7169
7170 void
7171 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7172 {
7173 regcache_xfree (inf_state->registers);
7174 xfree (inf_state->siginfo_data);
7175 xfree (inf_state);
7176 }
7177
7178 struct regcache *
7179 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7180 {
7181 return inf_state->registers;
7182 }
7183
7184 /* infcall_control_state contains state regarding gdb's control of the
7185 inferior itself like stepping control. It also contains session state like
7186 the user's currently selected frame. */
7187
7188 struct infcall_control_state
7189 {
7190 struct thread_control_state thread_control;
7191 struct inferior_control_state inferior_control;
7192
7193 /* Other fields: */
7194 enum stop_stack_kind stop_stack_dummy;
7195 int stopped_by_random_signal;
7196 int stop_after_trap;
7197
7198 /* ID if the selected frame when the inferior function call was made. */
7199 struct frame_id selected_frame_id;
7200 };
7201
7202 /* Save all of the information associated with the inferior<==>gdb
7203 connection. */
7204
7205 struct infcall_control_state *
7206 save_infcall_control_state (void)
7207 {
7208 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7209 struct thread_info *tp = inferior_thread ();
7210 struct inferior *inf = current_inferior ();
7211
7212 inf_status->thread_control = tp->control;
7213 inf_status->inferior_control = inf->control;
7214
7215 tp->control.step_resume_breakpoint = NULL;
7216 tp->control.exception_resume_breakpoint = NULL;
7217
7218 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7219 chain. If caller's caller is walking the chain, they'll be happier if we
7220 hand them back the original chain when restore_infcall_control_state is
7221 called. */
7222 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7223
7224 /* Other fields: */
7225 inf_status->stop_stack_dummy = stop_stack_dummy;
7226 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7227 inf_status->stop_after_trap = stop_after_trap;
7228
7229 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7230
7231 return inf_status;
7232 }
7233
7234 static int
7235 restore_selected_frame (void *args)
7236 {
7237 struct frame_id *fid = (struct frame_id *) args;
7238 struct frame_info *frame;
7239
7240 frame = frame_find_by_id (*fid);
7241
7242 /* If inf_status->selected_frame_id is NULL, there was no previously
7243 selected frame. */
7244 if (frame == NULL)
7245 {
7246 warning (_("Unable to restore previously selected frame."));
7247 return 0;
7248 }
7249
7250 select_frame (frame);
7251
7252 return (1);
7253 }
7254
7255 /* Restore inferior session state to INF_STATUS. */
7256
7257 void
7258 restore_infcall_control_state (struct infcall_control_state *inf_status)
7259 {
7260 struct thread_info *tp = inferior_thread ();
7261 struct inferior *inf = current_inferior ();
7262
7263 if (tp->control.step_resume_breakpoint)
7264 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7265
7266 if (tp->control.exception_resume_breakpoint)
7267 tp->control.exception_resume_breakpoint->disposition
7268 = disp_del_at_next_stop;
7269
7270 /* Handle the bpstat_copy of the chain. */
7271 bpstat_clear (&tp->control.stop_bpstat);
7272
7273 tp->control = inf_status->thread_control;
7274 inf->control = inf_status->inferior_control;
7275
7276 /* Other fields: */
7277 stop_stack_dummy = inf_status->stop_stack_dummy;
7278 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7279 stop_after_trap = inf_status->stop_after_trap;
7280
7281 if (target_has_stack)
7282 {
7283 /* The point of catch_errors is that if the stack is clobbered,
7284 walking the stack might encounter a garbage pointer and
7285 error() trying to dereference it. */
7286 if (catch_errors
7287 (restore_selected_frame, &inf_status->selected_frame_id,
7288 "Unable to restore previously selected frame:\n",
7289 RETURN_MASK_ERROR) == 0)
7290 /* Error in restoring the selected frame. Select the innermost
7291 frame. */
7292 select_frame (get_current_frame ());
7293 }
7294
7295 xfree (inf_status);
7296 }
7297
7298 static void
7299 do_restore_infcall_control_state_cleanup (void *sts)
7300 {
7301 restore_infcall_control_state (sts);
7302 }
7303
7304 struct cleanup *
7305 make_cleanup_restore_infcall_control_state
7306 (struct infcall_control_state *inf_status)
7307 {
7308 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7309 }
7310
7311 void
7312 discard_infcall_control_state (struct infcall_control_state *inf_status)
7313 {
7314 if (inf_status->thread_control.step_resume_breakpoint)
7315 inf_status->thread_control.step_resume_breakpoint->disposition
7316 = disp_del_at_next_stop;
7317
7318 if (inf_status->thread_control.exception_resume_breakpoint)
7319 inf_status->thread_control.exception_resume_breakpoint->disposition
7320 = disp_del_at_next_stop;
7321
7322 /* See save_infcall_control_state for info on stop_bpstat. */
7323 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7324
7325 xfree (inf_status);
7326 }
7327 \f
7328 /* restore_inferior_ptid() will be used by the cleanup machinery
7329 to restore the inferior_ptid value saved in a call to
7330 save_inferior_ptid(). */
7331
7332 static void
7333 restore_inferior_ptid (void *arg)
7334 {
7335 ptid_t *saved_ptid_ptr = arg;
7336
7337 inferior_ptid = *saved_ptid_ptr;
7338 xfree (arg);
7339 }
7340
7341 /* Save the value of inferior_ptid so that it may be restored by a
7342 later call to do_cleanups(). Returns the struct cleanup pointer
7343 needed for later doing the cleanup. */
7344
7345 struct cleanup *
7346 save_inferior_ptid (void)
7347 {
7348 ptid_t *saved_ptid_ptr;
7349
7350 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7351 *saved_ptid_ptr = inferior_ptid;
7352 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7353 }
7354
7355 /* See infrun.h. */
7356
7357 void
7358 clear_exit_convenience_vars (void)
7359 {
7360 clear_internalvar (lookup_internalvar ("_exitsignal"));
7361 clear_internalvar (lookup_internalvar ("_exitcode"));
7362 }
7363 \f
7364
7365 /* User interface for reverse debugging:
7366 Set exec-direction / show exec-direction commands
7367 (returns error unless target implements to_set_exec_direction method). */
7368
7369 int execution_direction = EXEC_FORWARD;
7370 static const char exec_forward[] = "forward";
7371 static const char exec_reverse[] = "reverse";
7372 static const char *exec_direction = exec_forward;
7373 static const char *const exec_direction_names[] = {
7374 exec_forward,
7375 exec_reverse,
7376 NULL
7377 };
7378
7379 static void
7380 set_exec_direction_func (char *args, int from_tty,
7381 struct cmd_list_element *cmd)
7382 {
7383 if (target_can_execute_reverse)
7384 {
7385 if (!strcmp (exec_direction, exec_forward))
7386 execution_direction = EXEC_FORWARD;
7387 else if (!strcmp (exec_direction, exec_reverse))
7388 execution_direction = EXEC_REVERSE;
7389 }
7390 else
7391 {
7392 exec_direction = exec_forward;
7393 error (_("Target does not support this operation."));
7394 }
7395 }
7396
7397 static void
7398 show_exec_direction_func (struct ui_file *out, int from_tty,
7399 struct cmd_list_element *cmd, const char *value)
7400 {
7401 switch (execution_direction) {
7402 case EXEC_FORWARD:
7403 fprintf_filtered (out, _("Forward.\n"));
7404 break;
7405 case EXEC_REVERSE:
7406 fprintf_filtered (out, _("Reverse.\n"));
7407 break;
7408 default:
7409 internal_error (__FILE__, __LINE__,
7410 _("bogus execution_direction value: %d"),
7411 (int) execution_direction);
7412 }
7413 }
7414
7415 static void
7416 show_schedule_multiple (struct ui_file *file, int from_tty,
7417 struct cmd_list_element *c, const char *value)
7418 {
7419 fprintf_filtered (file, _("Resuming the execution of threads "
7420 "of all processes is %s.\n"), value);
7421 }
7422
7423 /* Implementation of `siginfo' variable. */
7424
7425 static const struct internalvar_funcs siginfo_funcs =
7426 {
7427 siginfo_make_value,
7428 NULL,
7429 NULL
7430 };
7431
7432 void
7433 _initialize_infrun (void)
7434 {
7435 int i;
7436 int numsigs;
7437 struct cmd_list_element *c;
7438
7439 add_info ("signals", signals_info, _("\
7440 What debugger does when program gets various signals.\n\
7441 Specify a signal as argument to print info on that signal only."));
7442 add_info_alias ("handle", "signals", 0);
7443
7444 c = add_com ("handle", class_run, handle_command, _("\
7445 Specify how to handle signals.\n\
7446 Usage: handle SIGNAL [ACTIONS]\n\
7447 Args are signals and actions to apply to those signals.\n\
7448 If no actions are specified, the current settings for the specified signals\n\
7449 will be displayed instead.\n\
7450 \n\
7451 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7452 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7453 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7454 The special arg \"all\" is recognized to mean all signals except those\n\
7455 used by the debugger, typically SIGTRAP and SIGINT.\n\
7456 \n\
7457 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7458 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7459 Stop means reenter debugger if this signal happens (implies print).\n\
7460 Print means print a message if this signal happens.\n\
7461 Pass means let program see this signal; otherwise program doesn't know.\n\
7462 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7463 Pass and Stop may be combined.\n\
7464 \n\
7465 Multiple signals may be specified. Signal numbers and signal names\n\
7466 may be interspersed with actions, with the actions being performed for\n\
7467 all signals cumulatively specified."));
7468 set_cmd_completer (c, handle_completer);
7469
7470 if (xdb_commands)
7471 {
7472 add_com ("lz", class_info, signals_info, _("\
7473 What debugger does when program gets various signals.\n\
7474 Specify a signal as argument to print info on that signal only."));
7475 add_com ("z", class_run, xdb_handle_command, _("\
7476 Specify how to handle a signal.\n\
7477 Args are signals and actions to apply to those signals.\n\
7478 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7479 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7480 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7481 The special arg \"all\" is recognized to mean all signals except those\n\
7482 used by the debugger, typically SIGTRAP and SIGINT.\n\
7483 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7484 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7485 nopass), \"Q\" (noprint)\n\
7486 Stop means reenter debugger if this signal happens (implies print).\n\
7487 Print means print a message if this signal happens.\n\
7488 Pass means let program see this signal; otherwise program doesn't know.\n\
7489 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7490 Pass and Stop may be combined."));
7491 }
7492
7493 if (!dbx_commands)
7494 stop_command = add_cmd ("stop", class_obscure,
7495 not_just_help_class_command, _("\
7496 There is no `stop' command, but you can set a hook on `stop'.\n\
7497 This allows you to set a list of commands to be run each time execution\n\
7498 of the program stops."), &cmdlist);
7499
7500 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7501 Set inferior debugging."), _("\
7502 Show inferior debugging."), _("\
7503 When non-zero, inferior specific debugging is enabled."),
7504 NULL,
7505 show_debug_infrun,
7506 &setdebuglist, &showdebuglist);
7507
7508 add_setshow_boolean_cmd ("displaced", class_maintenance,
7509 &debug_displaced, _("\
7510 Set displaced stepping debugging."), _("\
7511 Show displaced stepping debugging."), _("\
7512 When non-zero, displaced stepping specific debugging is enabled."),
7513 NULL,
7514 show_debug_displaced,
7515 &setdebuglist, &showdebuglist);
7516
7517 add_setshow_boolean_cmd ("non-stop", no_class,
7518 &non_stop_1, _("\
7519 Set whether gdb controls the inferior in non-stop mode."), _("\
7520 Show whether gdb controls the inferior in non-stop mode."), _("\
7521 When debugging a multi-threaded program and this setting is\n\
7522 off (the default, also called all-stop mode), when one thread stops\n\
7523 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7524 all other threads in the program while you interact with the thread of\n\
7525 interest. When you continue or step a thread, you can allow the other\n\
7526 threads to run, or have them remain stopped, but while you inspect any\n\
7527 thread's state, all threads stop.\n\
7528 \n\
7529 In non-stop mode, when one thread stops, other threads can continue\n\
7530 to run freely. You'll be able to step each thread independently,\n\
7531 leave it stopped or free to run as needed."),
7532 set_non_stop,
7533 show_non_stop,
7534 &setlist,
7535 &showlist);
7536
7537 numsigs = (int) GDB_SIGNAL_LAST;
7538 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7539 signal_print = (unsigned char *)
7540 xmalloc (sizeof (signal_print[0]) * numsigs);
7541 signal_program = (unsigned char *)
7542 xmalloc (sizeof (signal_program[0]) * numsigs);
7543 signal_catch = (unsigned char *)
7544 xmalloc (sizeof (signal_catch[0]) * numsigs);
7545 signal_pass = (unsigned char *)
7546 xmalloc (sizeof (signal_pass[0]) * numsigs);
7547 for (i = 0; i < numsigs; i++)
7548 {
7549 signal_stop[i] = 1;
7550 signal_print[i] = 1;
7551 signal_program[i] = 1;
7552 signal_catch[i] = 0;
7553 }
7554
7555 /* Signals caused by debugger's own actions
7556 should not be given to the program afterwards. */
7557 signal_program[GDB_SIGNAL_TRAP] = 0;
7558 signal_program[GDB_SIGNAL_INT] = 0;
7559
7560 /* Signals that are not errors should not normally enter the debugger. */
7561 signal_stop[GDB_SIGNAL_ALRM] = 0;
7562 signal_print[GDB_SIGNAL_ALRM] = 0;
7563 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7564 signal_print[GDB_SIGNAL_VTALRM] = 0;
7565 signal_stop[GDB_SIGNAL_PROF] = 0;
7566 signal_print[GDB_SIGNAL_PROF] = 0;
7567 signal_stop[GDB_SIGNAL_CHLD] = 0;
7568 signal_print[GDB_SIGNAL_CHLD] = 0;
7569 signal_stop[GDB_SIGNAL_IO] = 0;
7570 signal_print[GDB_SIGNAL_IO] = 0;
7571 signal_stop[GDB_SIGNAL_POLL] = 0;
7572 signal_print[GDB_SIGNAL_POLL] = 0;
7573 signal_stop[GDB_SIGNAL_URG] = 0;
7574 signal_print[GDB_SIGNAL_URG] = 0;
7575 signal_stop[GDB_SIGNAL_WINCH] = 0;
7576 signal_print[GDB_SIGNAL_WINCH] = 0;
7577 signal_stop[GDB_SIGNAL_PRIO] = 0;
7578 signal_print[GDB_SIGNAL_PRIO] = 0;
7579
7580 /* These signals are used internally by user-level thread
7581 implementations. (See signal(5) on Solaris.) Like the above
7582 signals, a healthy program receives and handles them as part of
7583 its normal operation. */
7584 signal_stop[GDB_SIGNAL_LWP] = 0;
7585 signal_print[GDB_SIGNAL_LWP] = 0;
7586 signal_stop[GDB_SIGNAL_WAITING] = 0;
7587 signal_print[GDB_SIGNAL_WAITING] = 0;
7588 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7589 signal_print[GDB_SIGNAL_CANCEL] = 0;
7590
7591 /* Update cached state. */
7592 signal_cache_update (-1);
7593
7594 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7595 &stop_on_solib_events, _("\
7596 Set stopping for shared library events."), _("\
7597 Show stopping for shared library events."), _("\
7598 If nonzero, gdb will give control to the user when the dynamic linker\n\
7599 notifies gdb of shared library events. The most common event of interest\n\
7600 to the user would be loading/unloading of a new library."),
7601 set_stop_on_solib_events,
7602 show_stop_on_solib_events,
7603 &setlist, &showlist);
7604
7605 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7606 follow_fork_mode_kind_names,
7607 &follow_fork_mode_string, _("\
7608 Set debugger response to a program call of fork or vfork."), _("\
7609 Show debugger response to a program call of fork or vfork."), _("\
7610 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7611 parent - the original process is debugged after a fork\n\
7612 child - the new process is debugged after a fork\n\
7613 The unfollowed process will continue to run.\n\
7614 By default, the debugger will follow the parent process."),
7615 NULL,
7616 show_follow_fork_mode_string,
7617 &setlist, &showlist);
7618
7619 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7620 follow_exec_mode_names,
7621 &follow_exec_mode_string, _("\
7622 Set debugger response to a program call of exec."), _("\
7623 Show debugger response to a program call of exec."), _("\
7624 An exec call replaces the program image of a process.\n\
7625 \n\
7626 follow-exec-mode can be:\n\
7627 \n\
7628 new - the debugger creates a new inferior and rebinds the process\n\
7629 to this new inferior. The program the process was running before\n\
7630 the exec call can be restarted afterwards by restarting the original\n\
7631 inferior.\n\
7632 \n\
7633 same - the debugger keeps the process bound to the same inferior.\n\
7634 The new executable image replaces the previous executable loaded in\n\
7635 the inferior. Restarting the inferior after the exec call restarts\n\
7636 the executable the process was running after the exec call.\n\
7637 \n\
7638 By default, the debugger will use the same inferior."),
7639 NULL,
7640 show_follow_exec_mode_string,
7641 &setlist, &showlist);
7642
7643 add_setshow_enum_cmd ("scheduler-locking", class_run,
7644 scheduler_enums, &scheduler_mode, _("\
7645 Set mode for locking scheduler during execution."), _("\
7646 Show mode for locking scheduler during execution."), _("\
7647 off == no locking (threads may preempt at any time)\n\
7648 on == full locking (no thread except the current thread may run)\n\
7649 step == scheduler locked during every single-step operation.\n\
7650 In this mode, no other thread may run during a step command.\n\
7651 Other threads may run while stepping over a function call ('next')."),
7652 set_schedlock_func, /* traps on target vector */
7653 show_scheduler_mode,
7654 &setlist, &showlist);
7655
7656 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7657 Set mode for resuming threads of all processes."), _("\
7658 Show mode for resuming threads of all processes."), _("\
7659 When on, execution commands (such as 'continue' or 'next') resume all\n\
7660 threads of all processes. When off (which is the default), execution\n\
7661 commands only resume the threads of the current process. The set of\n\
7662 threads that are resumed is further refined by the scheduler-locking\n\
7663 mode (see help set scheduler-locking)."),
7664 NULL,
7665 show_schedule_multiple,
7666 &setlist, &showlist);
7667
7668 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7669 Set mode of the step operation."), _("\
7670 Show mode of the step operation."), _("\
7671 When set, doing a step over a function without debug line information\n\
7672 will stop at the first instruction of that function. Otherwise, the\n\
7673 function is skipped and the step command stops at a different source line."),
7674 NULL,
7675 show_step_stop_if_no_debug,
7676 &setlist, &showlist);
7677
7678 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7679 &can_use_displaced_stepping, _("\
7680 Set debugger's willingness to use displaced stepping."), _("\
7681 Show debugger's willingness to use displaced stepping."), _("\
7682 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7683 supported by the target architecture. If off, gdb will not use displaced\n\
7684 stepping to step over breakpoints, even if such is supported by the target\n\
7685 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7686 if the target architecture supports it and non-stop mode is active, but will not\n\
7687 use it in all-stop mode (see help set non-stop)."),
7688 NULL,
7689 show_can_use_displaced_stepping,
7690 &setlist, &showlist);
7691
7692 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7693 &exec_direction, _("Set direction of execution.\n\
7694 Options are 'forward' or 'reverse'."),
7695 _("Show direction of execution (forward/reverse)."),
7696 _("Tells gdb whether to execute forward or backward."),
7697 set_exec_direction_func, show_exec_direction_func,
7698 &setlist, &showlist);
7699
7700 /* Set/show detach-on-fork: user-settable mode. */
7701
7702 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7703 Set whether gdb will detach the child of a fork."), _("\
7704 Show whether gdb will detach the child of a fork."), _("\
7705 Tells gdb whether to detach the child of a fork."),
7706 NULL, NULL, &setlist, &showlist);
7707
7708 /* Set/show disable address space randomization mode. */
7709
7710 add_setshow_boolean_cmd ("disable-randomization", class_support,
7711 &disable_randomization, _("\
7712 Set disabling of debuggee's virtual address space randomization."), _("\
7713 Show disabling of debuggee's virtual address space randomization."), _("\
7714 When this mode is on (which is the default), randomization of the virtual\n\
7715 address space is disabled. Standalone programs run with the randomization\n\
7716 enabled by default on some platforms."),
7717 &set_disable_randomization,
7718 &show_disable_randomization,
7719 &setlist, &showlist);
7720
7721 /* ptid initializations */
7722 inferior_ptid = null_ptid;
7723 target_last_wait_ptid = minus_one_ptid;
7724
7725 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7726 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7727 observer_attach_thread_exit (infrun_thread_thread_exit);
7728 observer_attach_inferior_exit (infrun_inferior_exit);
7729
7730 /* Explicitly create without lookup, since that tries to create a
7731 value with a void typed value, and when we get here, gdbarch
7732 isn't initialized yet. At this point, we're quite sure there
7733 isn't another convenience variable of the same name. */
7734 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7735
7736 add_setshow_boolean_cmd ("observer", no_class,
7737 &observer_mode_1, _("\
7738 Set whether gdb controls the inferior in observer mode."), _("\
7739 Show whether gdb controls the inferior in observer mode."), _("\
7740 In observer mode, GDB can get data from the inferior, but not\n\
7741 affect its execution. Registers and memory may not be changed,\n\
7742 breakpoints may not be set, and the program cannot be interrupted\n\
7743 or signalled."),
7744 set_observer_mode,
7745 show_observer_mode,
7746 &setlist,
7747 &showlist);
7748 }
This page took 0.209854 seconds and 5 git commands to generate.