implement support for "enum class"
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include <string.h>
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "record-full.h"
53 #include "inline-frame.h"
54 #include "jit.h"
55 #include "tracepoint.h"
56 #include "continuations.h"
57 #include "interps.h"
58 #include "skip.h"
59 #include "probe.h"
60 #include "objfiles.h"
61 #include "completer.h"
62 #include "target-descriptions.h"
63 #include "target-dcache.h"
64
65 /* Prototypes for local functions */
66
67 static void signals_info (char *, int);
68
69 static void handle_command (char *, int);
70
71 static void sig_print_info (enum gdb_signal);
72
73 static void sig_print_header (void);
74
75 static void resume_cleanups (void *);
76
77 static int hook_stop_stub (void *);
78
79 static int restore_selected_frame (void *);
80
81 static int follow_fork (void);
82
83 static void set_schedlock_func (char *args, int from_tty,
84 struct cmd_list_element *c);
85
86 static int currently_stepping (struct thread_info *tp);
87
88 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
89 void *data);
90
91 static void xdb_handle_command (char *args, int from_tty);
92
93 static void print_exited_reason (int exitstatus);
94
95 static void print_signal_exited_reason (enum gdb_signal siggnal);
96
97 static void print_no_history_reason (void);
98
99 static void print_signal_received_reason (enum gdb_signal siggnal);
100
101 static void print_end_stepping_range_reason (void);
102
103 void _initialize_infrun (void);
104
105 void nullify_last_target_wait_ptid (void);
106
107 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
108
109 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
110
111 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
112
113 /* When set, stop the 'step' command if we enter a function which has
114 no line number information. The normal behavior is that we step
115 over such function. */
116 int step_stop_if_no_debug = 0;
117 static void
118 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
119 struct cmd_list_element *c, const char *value)
120 {
121 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
122 }
123
124 /* In asynchronous mode, but simulating synchronous execution. */
125
126 int sync_execution = 0;
127
128 /* proceed and normal_stop use this to notify the user when the
129 inferior stopped in a different thread than it had been running
130 in. */
131
132 static ptid_t previous_inferior_ptid;
133
134 /* If set (default for legacy reasons), when following a fork, GDB
135 will detach from one of the fork branches, child or parent.
136 Exactly which branch is detached depends on 'set follow-fork-mode'
137 setting. */
138
139 static int detach_fork = 1;
140
141 int debug_displaced = 0;
142 static void
143 show_debug_displaced (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
147 }
148
149 unsigned int debug_infrun = 0;
150 static void
151 show_debug_infrun (struct ui_file *file, int from_tty,
152 struct cmd_list_element *c, const char *value)
153 {
154 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
155 }
156
157
158 /* Support for disabling address space randomization. */
159
160 int disable_randomization = 1;
161
162 static void
163 show_disable_randomization (struct ui_file *file, int from_tty,
164 struct cmd_list_element *c, const char *value)
165 {
166 if (target_supports_disable_randomization ())
167 fprintf_filtered (file,
168 _("Disabling randomization of debuggee's "
169 "virtual address space is %s.\n"),
170 value);
171 else
172 fputs_filtered (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform.\n"), file);
175 }
176
177 static void
178 set_disable_randomization (char *args, int from_tty,
179 struct cmd_list_element *c)
180 {
181 if (!target_supports_disable_randomization ())
182 error (_("Disabling randomization of debuggee's "
183 "virtual address space is unsupported on\n"
184 "this platform."));
185 }
186
187 /* User interface for non-stop mode. */
188
189 int non_stop = 0;
190 static int non_stop_1 = 0;
191
192 static void
193 set_non_stop (char *args, int from_tty,
194 struct cmd_list_element *c)
195 {
196 if (target_has_execution)
197 {
198 non_stop_1 = non_stop;
199 error (_("Cannot change this setting while the inferior is running."));
200 }
201
202 non_stop = non_stop_1;
203 }
204
205 static void
206 show_non_stop (struct ui_file *file, int from_tty,
207 struct cmd_list_element *c, const char *value)
208 {
209 fprintf_filtered (file,
210 _("Controlling the inferior in non-stop mode is %s.\n"),
211 value);
212 }
213
214 /* "Observer mode" is somewhat like a more extreme version of
215 non-stop, in which all GDB operations that might affect the
216 target's execution have been disabled. */
217
218 int observer_mode = 0;
219 static int observer_mode_1 = 0;
220
221 static void
222 set_observer_mode (char *args, int from_tty,
223 struct cmd_list_element *c)
224 {
225 if (target_has_execution)
226 {
227 observer_mode_1 = observer_mode;
228 error (_("Cannot change this setting while the inferior is running."));
229 }
230
231 observer_mode = observer_mode_1;
232
233 may_write_registers = !observer_mode;
234 may_write_memory = !observer_mode;
235 may_insert_breakpoints = !observer_mode;
236 may_insert_tracepoints = !observer_mode;
237 /* We can insert fast tracepoints in or out of observer mode,
238 but enable them if we're going into this mode. */
239 if (observer_mode)
240 may_insert_fast_tracepoints = 1;
241 may_stop = !observer_mode;
242 update_target_permissions ();
243
244 /* Going *into* observer mode we must force non-stop, then
245 going out we leave it that way. */
246 if (observer_mode)
247 {
248 target_async_permitted = 1;
249 pagination_enabled = 0;
250 non_stop = non_stop_1 = 1;
251 }
252
253 if (from_tty)
254 printf_filtered (_("Observer mode is now %s.\n"),
255 (observer_mode ? "on" : "off"));
256 }
257
258 static void
259 show_observer_mode (struct ui_file *file, int from_tty,
260 struct cmd_list_element *c, const char *value)
261 {
262 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
263 }
264
265 /* This updates the value of observer mode based on changes in
266 permissions. Note that we are deliberately ignoring the values of
267 may-write-registers and may-write-memory, since the user may have
268 reason to enable these during a session, for instance to turn on a
269 debugging-related global. */
270
271 void
272 update_observer_mode (void)
273 {
274 int newval;
275
276 newval = (!may_insert_breakpoints
277 && !may_insert_tracepoints
278 && may_insert_fast_tracepoints
279 && !may_stop
280 && non_stop);
281
282 /* Let the user know if things change. */
283 if (newval != observer_mode)
284 printf_filtered (_("Observer mode is now %s.\n"),
285 (newval ? "on" : "off"));
286
287 observer_mode = observer_mode_1 = newval;
288 }
289
290 /* Tables of how to react to signals; the user sets them. */
291
292 static unsigned char *signal_stop;
293 static unsigned char *signal_print;
294 static unsigned char *signal_program;
295
296 /* Table of signals that are registered with "catch signal". A
297 non-zero entry indicates that the signal is caught by some "catch
298 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
299 signals. */
300 static unsigned char *signal_catch;
301
302 /* Table of signals that the target may silently handle.
303 This is automatically determined from the flags above,
304 and simply cached here. */
305 static unsigned char *signal_pass;
306
307 #define SET_SIGS(nsigs,sigs,flags) \
308 do { \
309 int signum = (nsigs); \
310 while (signum-- > 0) \
311 if ((sigs)[signum]) \
312 (flags)[signum] = 1; \
313 } while (0)
314
315 #define UNSET_SIGS(nsigs,sigs,flags) \
316 do { \
317 int signum = (nsigs); \
318 while (signum-- > 0) \
319 if ((sigs)[signum]) \
320 (flags)[signum] = 0; \
321 } while (0)
322
323 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
324 this function is to avoid exporting `signal_program'. */
325
326 void
327 update_signals_program_target (void)
328 {
329 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
330 }
331
332 /* Value to pass to target_resume() to cause all threads to resume. */
333
334 #define RESUME_ALL minus_one_ptid
335
336 /* Command list pointer for the "stop" placeholder. */
337
338 static struct cmd_list_element *stop_command;
339
340 /* Function inferior was in as of last step command. */
341
342 static struct symbol *step_start_function;
343
344 /* Nonzero if we want to give control to the user when we're notified
345 of shared library events by the dynamic linker. */
346 int stop_on_solib_events;
347
348 /* Enable or disable optional shared library event breakpoints
349 as appropriate when the above flag is changed. */
350
351 static void
352 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
353 {
354 update_solib_breakpoints ();
355 }
356
357 static void
358 show_stop_on_solib_events (struct ui_file *file, int from_tty,
359 struct cmd_list_element *c, const char *value)
360 {
361 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
362 value);
363 }
364
365 /* Nonzero means expecting a trace trap
366 and should stop the inferior and return silently when it happens. */
367
368 int stop_after_trap;
369
370 /* Save register contents here when executing a "finish" command or are
371 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
372 Thus this contains the return value from the called function (assuming
373 values are returned in a register). */
374
375 struct regcache *stop_registers;
376
377 /* Nonzero after stop if current stack frame should be printed. */
378
379 static int stop_print_frame;
380
381 /* This is a cached copy of the pid/waitstatus of the last event
382 returned by target_wait()/deprecated_target_wait_hook(). This
383 information is returned by get_last_target_status(). */
384 static ptid_t target_last_wait_ptid;
385 static struct target_waitstatus target_last_waitstatus;
386
387 static void context_switch (ptid_t ptid);
388
389 void init_thread_stepping_state (struct thread_info *tss);
390
391 static void init_infwait_state (void);
392
393 static const char follow_fork_mode_child[] = "child";
394 static const char follow_fork_mode_parent[] = "parent";
395
396 static const char *const follow_fork_mode_kind_names[] = {
397 follow_fork_mode_child,
398 follow_fork_mode_parent,
399 NULL
400 };
401
402 static const char *follow_fork_mode_string = follow_fork_mode_parent;
403 static void
404 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
405 struct cmd_list_element *c, const char *value)
406 {
407 fprintf_filtered (file,
408 _("Debugger response to a program "
409 "call of fork or vfork is \"%s\".\n"),
410 value);
411 }
412 \f
413
414 /* Tell the target to follow the fork we're stopped at. Returns true
415 if the inferior should be resumed; false, if the target for some
416 reason decided it's best not to resume. */
417
418 static int
419 follow_fork (void)
420 {
421 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
422 int should_resume = 1;
423 struct thread_info *tp;
424
425 /* Copy user stepping state to the new inferior thread. FIXME: the
426 followed fork child thread should have a copy of most of the
427 parent thread structure's run control related fields, not just these.
428 Initialized to avoid "may be used uninitialized" warnings from gcc. */
429 struct breakpoint *step_resume_breakpoint = NULL;
430 struct breakpoint *exception_resume_breakpoint = NULL;
431 CORE_ADDR step_range_start = 0;
432 CORE_ADDR step_range_end = 0;
433 struct frame_id step_frame_id = { 0 };
434
435 if (!non_stop)
436 {
437 ptid_t wait_ptid;
438 struct target_waitstatus wait_status;
439
440 /* Get the last target status returned by target_wait(). */
441 get_last_target_status (&wait_ptid, &wait_status);
442
443 /* If not stopped at a fork event, then there's nothing else to
444 do. */
445 if (wait_status.kind != TARGET_WAITKIND_FORKED
446 && wait_status.kind != TARGET_WAITKIND_VFORKED)
447 return 1;
448
449 /* Check if we switched over from WAIT_PTID, since the event was
450 reported. */
451 if (!ptid_equal (wait_ptid, minus_one_ptid)
452 && !ptid_equal (inferior_ptid, wait_ptid))
453 {
454 /* We did. Switch back to WAIT_PTID thread, to tell the
455 target to follow it (in either direction). We'll
456 afterwards refuse to resume, and inform the user what
457 happened. */
458 switch_to_thread (wait_ptid);
459 should_resume = 0;
460 }
461 }
462
463 tp = inferior_thread ();
464
465 /* If there were any forks/vforks that were caught and are now to be
466 followed, then do so now. */
467 switch (tp->pending_follow.kind)
468 {
469 case TARGET_WAITKIND_FORKED:
470 case TARGET_WAITKIND_VFORKED:
471 {
472 ptid_t parent, child;
473
474 /* If the user did a next/step, etc, over a fork call,
475 preserve the stepping state in the fork child. */
476 if (follow_child && should_resume)
477 {
478 step_resume_breakpoint = clone_momentary_breakpoint
479 (tp->control.step_resume_breakpoint);
480 step_range_start = tp->control.step_range_start;
481 step_range_end = tp->control.step_range_end;
482 step_frame_id = tp->control.step_frame_id;
483 exception_resume_breakpoint
484 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
485
486 /* For now, delete the parent's sr breakpoint, otherwise,
487 parent/child sr breakpoints are considered duplicates,
488 and the child version will not be installed. Remove
489 this when the breakpoints module becomes aware of
490 inferiors and address spaces. */
491 delete_step_resume_breakpoint (tp);
492 tp->control.step_range_start = 0;
493 tp->control.step_range_end = 0;
494 tp->control.step_frame_id = null_frame_id;
495 delete_exception_resume_breakpoint (tp);
496 }
497
498 parent = inferior_ptid;
499 child = tp->pending_follow.value.related_pid;
500
501 /* Tell the target to do whatever is necessary to follow
502 either parent or child. */
503 if (target_follow_fork (follow_child, detach_fork))
504 {
505 /* Target refused to follow, or there's some other reason
506 we shouldn't resume. */
507 should_resume = 0;
508 }
509 else
510 {
511 /* This pending follow fork event is now handled, one way
512 or another. The previous selected thread may be gone
513 from the lists by now, but if it is still around, need
514 to clear the pending follow request. */
515 tp = find_thread_ptid (parent);
516 if (tp)
517 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
518
519 /* This makes sure we don't try to apply the "Switched
520 over from WAIT_PID" logic above. */
521 nullify_last_target_wait_ptid ();
522
523 /* If we followed the child, switch to it... */
524 if (follow_child)
525 {
526 switch_to_thread (child);
527
528 /* ... and preserve the stepping state, in case the
529 user was stepping over the fork call. */
530 if (should_resume)
531 {
532 tp = inferior_thread ();
533 tp->control.step_resume_breakpoint
534 = step_resume_breakpoint;
535 tp->control.step_range_start = step_range_start;
536 tp->control.step_range_end = step_range_end;
537 tp->control.step_frame_id = step_frame_id;
538 tp->control.exception_resume_breakpoint
539 = exception_resume_breakpoint;
540 }
541 else
542 {
543 /* If we get here, it was because we're trying to
544 resume from a fork catchpoint, but, the user
545 has switched threads away from the thread that
546 forked. In that case, the resume command
547 issued is most likely not applicable to the
548 child, so just warn, and refuse to resume. */
549 warning (_("Not resuming: switched threads "
550 "before following fork child.\n"));
551 }
552
553 /* Reset breakpoints in the child as appropriate. */
554 follow_inferior_reset_breakpoints ();
555 }
556 else
557 switch_to_thread (parent);
558 }
559 }
560 break;
561 case TARGET_WAITKIND_SPURIOUS:
562 /* Nothing to follow. */
563 break;
564 default:
565 internal_error (__FILE__, __LINE__,
566 "Unexpected pending_follow.kind %d\n",
567 tp->pending_follow.kind);
568 break;
569 }
570
571 return should_resume;
572 }
573
574 void
575 follow_inferior_reset_breakpoints (void)
576 {
577 struct thread_info *tp = inferior_thread ();
578
579 /* Was there a step_resume breakpoint? (There was if the user
580 did a "next" at the fork() call.) If so, explicitly reset its
581 thread number.
582
583 step_resumes are a form of bp that are made to be per-thread.
584 Since we created the step_resume bp when the parent process
585 was being debugged, and now are switching to the child process,
586 from the breakpoint package's viewpoint, that's a switch of
587 "threads". We must update the bp's notion of which thread
588 it is for, or it'll be ignored when it triggers. */
589
590 if (tp->control.step_resume_breakpoint)
591 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
592
593 if (tp->control.exception_resume_breakpoint)
594 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
595
596 /* Reinsert all breakpoints in the child. The user may have set
597 breakpoints after catching the fork, in which case those
598 were never set in the child, but only in the parent. This makes
599 sure the inserted breakpoints match the breakpoint list. */
600
601 breakpoint_re_set ();
602 insert_breakpoints ();
603 }
604
605 /* The child has exited or execed: resume threads of the parent the
606 user wanted to be executing. */
607
608 static int
609 proceed_after_vfork_done (struct thread_info *thread,
610 void *arg)
611 {
612 int pid = * (int *) arg;
613
614 if (ptid_get_pid (thread->ptid) == pid
615 && is_running (thread->ptid)
616 && !is_executing (thread->ptid)
617 && !thread->stop_requested
618 && thread->suspend.stop_signal == GDB_SIGNAL_0)
619 {
620 if (debug_infrun)
621 fprintf_unfiltered (gdb_stdlog,
622 "infrun: resuming vfork parent thread %s\n",
623 target_pid_to_str (thread->ptid));
624
625 switch_to_thread (thread->ptid);
626 clear_proceed_status ();
627 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
628 }
629
630 return 0;
631 }
632
633 /* Called whenever we notice an exec or exit event, to handle
634 detaching or resuming a vfork parent. */
635
636 static void
637 handle_vfork_child_exec_or_exit (int exec)
638 {
639 struct inferior *inf = current_inferior ();
640
641 if (inf->vfork_parent)
642 {
643 int resume_parent = -1;
644
645 /* This exec or exit marks the end of the shared memory region
646 between the parent and the child. If the user wanted to
647 detach from the parent, now is the time. */
648
649 if (inf->vfork_parent->pending_detach)
650 {
651 struct thread_info *tp;
652 struct cleanup *old_chain;
653 struct program_space *pspace;
654 struct address_space *aspace;
655
656 /* follow-fork child, detach-on-fork on. */
657
658 inf->vfork_parent->pending_detach = 0;
659
660 if (!exec)
661 {
662 /* If we're handling a child exit, then inferior_ptid
663 points at the inferior's pid, not to a thread. */
664 old_chain = save_inferior_ptid ();
665 save_current_program_space ();
666 save_current_inferior ();
667 }
668 else
669 old_chain = save_current_space_and_thread ();
670
671 /* We're letting loose of the parent. */
672 tp = any_live_thread_of_process (inf->vfork_parent->pid);
673 switch_to_thread (tp->ptid);
674
675 /* We're about to detach from the parent, which implicitly
676 removes breakpoints from its address space. There's a
677 catch here: we want to reuse the spaces for the child,
678 but, parent/child are still sharing the pspace at this
679 point, although the exec in reality makes the kernel give
680 the child a fresh set of new pages. The problem here is
681 that the breakpoints module being unaware of this, would
682 likely chose the child process to write to the parent
683 address space. Swapping the child temporarily away from
684 the spaces has the desired effect. Yes, this is "sort
685 of" a hack. */
686
687 pspace = inf->pspace;
688 aspace = inf->aspace;
689 inf->aspace = NULL;
690 inf->pspace = NULL;
691
692 if (debug_infrun || info_verbose)
693 {
694 target_terminal_ours ();
695
696 if (exec)
697 fprintf_filtered (gdb_stdlog,
698 "Detaching vfork parent process "
699 "%d after child exec.\n",
700 inf->vfork_parent->pid);
701 else
702 fprintf_filtered (gdb_stdlog,
703 "Detaching vfork parent process "
704 "%d after child exit.\n",
705 inf->vfork_parent->pid);
706 }
707
708 target_detach (NULL, 0);
709
710 /* Put it back. */
711 inf->pspace = pspace;
712 inf->aspace = aspace;
713
714 do_cleanups (old_chain);
715 }
716 else if (exec)
717 {
718 /* We're staying attached to the parent, so, really give the
719 child a new address space. */
720 inf->pspace = add_program_space (maybe_new_address_space ());
721 inf->aspace = inf->pspace->aspace;
722 inf->removable = 1;
723 set_current_program_space (inf->pspace);
724
725 resume_parent = inf->vfork_parent->pid;
726
727 /* Break the bonds. */
728 inf->vfork_parent->vfork_child = NULL;
729 }
730 else
731 {
732 struct cleanup *old_chain;
733 struct program_space *pspace;
734
735 /* If this is a vfork child exiting, then the pspace and
736 aspaces were shared with the parent. Since we're
737 reporting the process exit, we'll be mourning all that is
738 found in the address space, and switching to null_ptid,
739 preparing to start a new inferior. But, since we don't
740 want to clobber the parent's address/program spaces, we
741 go ahead and create a new one for this exiting
742 inferior. */
743
744 /* Switch to null_ptid, so that clone_program_space doesn't want
745 to read the selected frame of a dead process. */
746 old_chain = save_inferior_ptid ();
747 inferior_ptid = null_ptid;
748
749 /* This inferior is dead, so avoid giving the breakpoints
750 module the option to write through to it (cloning a
751 program space resets breakpoints). */
752 inf->aspace = NULL;
753 inf->pspace = NULL;
754 pspace = add_program_space (maybe_new_address_space ());
755 set_current_program_space (pspace);
756 inf->removable = 1;
757 inf->symfile_flags = SYMFILE_NO_READ;
758 clone_program_space (pspace, inf->vfork_parent->pspace);
759 inf->pspace = pspace;
760 inf->aspace = pspace->aspace;
761
762 /* Put back inferior_ptid. We'll continue mourning this
763 inferior. */
764 do_cleanups (old_chain);
765
766 resume_parent = inf->vfork_parent->pid;
767 /* Break the bonds. */
768 inf->vfork_parent->vfork_child = NULL;
769 }
770
771 inf->vfork_parent = NULL;
772
773 gdb_assert (current_program_space == inf->pspace);
774
775 if (non_stop && resume_parent != -1)
776 {
777 /* If the user wanted the parent to be running, let it go
778 free now. */
779 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
780
781 if (debug_infrun)
782 fprintf_unfiltered (gdb_stdlog,
783 "infrun: resuming vfork parent process %d\n",
784 resume_parent);
785
786 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
787
788 do_cleanups (old_chain);
789 }
790 }
791 }
792
793 /* Enum strings for "set|show follow-exec-mode". */
794
795 static const char follow_exec_mode_new[] = "new";
796 static const char follow_exec_mode_same[] = "same";
797 static const char *const follow_exec_mode_names[] =
798 {
799 follow_exec_mode_new,
800 follow_exec_mode_same,
801 NULL,
802 };
803
804 static const char *follow_exec_mode_string = follow_exec_mode_same;
805 static void
806 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
807 struct cmd_list_element *c, const char *value)
808 {
809 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
810 }
811
812 /* EXECD_PATHNAME is assumed to be non-NULL. */
813
814 static void
815 follow_exec (ptid_t pid, char *execd_pathname)
816 {
817 struct thread_info *th = inferior_thread ();
818 struct inferior *inf = current_inferior ();
819
820 /* This is an exec event that we actually wish to pay attention to.
821 Refresh our symbol table to the newly exec'd program, remove any
822 momentary bp's, etc.
823
824 If there are breakpoints, they aren't really inserted now,
825 since the exec() transformed our inferior into a fresh set
826 of instructions.
827
828 We want to preserve symbolic breakpoints on the list, since
829 we have hopes that they can be reset after the new a.out's
830 symbol table is read.
831
832 However, any "raw" breakpoints must be removed from the list
833 (e.g., the solib bp's), since their address is probably invalid
834 now.
835
836 And, we DON'T want to call delete_breakpoints() here, since
837 that may write the bp's "shadow contents" (the instruction
838 value that was overwritten witha TRAP instruction). Since
839 we now have a new a.out, those shadow contents aren't valid. */
840
841 mark_breakpoints_out ();
842
843 update_breakpoints_after_exec ();
844
845 /* If there was one, it's gone now. We cannot truly step-to-next
846 statement through an exec(). */
847 th->control.step_resume_breakpoint = NULL;
848 th->control.exception_resume_breakpoint = NULL;
849 th->control.step_range_start = 0;
850 th->control.step_range_end = 0;
851
852 /* The target reports the exec event to the main thread, even if
853 some other thread does the exec, and even if the main thread was
854 already stopped --- if debugging in non-stop mode, it's possible
855 the user had the main thread held stopped in the previous image
856 --- release it now. This is the same behavior as step-over-exec
857 with scheduler-locking on in all-stop mode. */
858 th->stop_requested = 0;
859
860 /* What is this a.out's name? */
861 printf_unfiltered (_("%s is executing new program: %s\n"),
862 target_pid_to_str (inferior_ptid),
863 execd_pathname);
864
865 /* We've followed the inferior through an exec. Therefore, the
866 inferior has essentially been killed & reborn. */
867
868 gdb_flush (gdb_stdout);
869
870 breakpoint_init_inferior (inf_execd);
871
872 if (gdb_sysroot && *gdb_sysroot)
873 {
874 char *name = alloca (strlen (gdb_sysroot)
875 + strlen (execd_pathname)
876 + 1);
877
878 strcpy (name, gdb_sysroot);
879 strcat (name, execd_pathname);
880 execd_pathname = name;
881 }
882
883 /* Reset the shared library package. This ensures that we get a
884 shlib event when the child reaches "_start", at which point the
885 dld will have had a chance to initialize the child. */
886 /* Also, loading a symbol file below may trigger symbol lookups, and
887 we don't want those to be satisfied by the libraries of the
888 previous incarnation of this process. */
889 no_shared_libraries (NULL, 0);
890
891 if (follow_exec_mode_string == follow_exec_mode_new)
892 {
893 struct program_space *pspace;
894
895 /* The user wants to keep the old inferior and program spaces
896 around. Create a new fresh one, and switch to it. */
897
898 inf = add_inferior (current_inferior ()->pid);
899 pspace = add_program_space (maybe_new_address_space ());
900 inf->pspace = pspace;
901 inf->aspace = pspace->aspace;
902
903 exit_inferior_num_silent (current_inferior ()->num);
904
905 set_current_inferior (inf);
906 set_current_program_space (pspace);
907 }
908 else
909 {
910 /* The old description may no longer be fit for the new image.
911 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
912 old description; we'll read a new one below. No need to do
913 this on "follow-exec-mode new", as the old inferior stays
914 around (its description is later cleared/refetched on
915 restart). */
916 target_clear_description ();
917 }
918
919 gdb_assert (current_program_space == inf->pspace);
920
921 /* That a.out is now the one to use. */
922 exec_file_attach (execd_pathname, 0);
923
924 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
925 (Position Independent Executable) main symbol file will get applied by
926 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
927 the breakpoints with the zero displacement. */
928
929 symbol_file_add (execd_pathname,
930 (inf->symfile_flags
931 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
932 NULL, 0);
933
934 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
935 set_initial_language ();
936
937 /* If the target can specify a description, read it. Must do this
938 after flipping to the new executable (because the target supplied
939 description must be compatible with the executable's
940 architecture, and the old executable may e.g., be 32-bit, while
941 the new one 64-bit), and before anything involving memory or
942 registers. */
943 target_find_description ();
944
945 solib_create_inferior_hook (0);
946
947 jit_inferior_created_hook ();
948
949 breakpoint_re_set ();
950
951 /* Reinsert all breakpoints. (Those which were symbolic have
952 been reset to the proper address in the new a.out, thanks
953 to symbol_file_command...). */
954 insert_breakpoints ();
955
956 /* The next resume of this inferior should bring it to the shlib
957 startup breakpoints. (If the user had also set bp's on
958 "main" from the old (parent) process, then they'll auto-
959 matically get reset there in the new process.). */
960 }
961
962 /* Non-zero if we just simulating a single-step. This is needed
963 because we cannot remove the breakpoints in the inferior process
964 until after the `wait' in `wait_for_inferior'. */
965 static int singlestep_breakpoints_inserted_p = 0;
966
967 /* The thread we inserted single-step breakpoints for. */
968 static ptid_t singlestep_ptid;
969
970 /* PC when we started this single-step. */
971 static CORE_ADDR singlestep_pc;
972
973 /* Info about an instruction that is being stepped over. Invalid if
974 ASPACE is NULL. */
975
976 struct step_over_info
977 {
978 /* The instruction's address space. */
979 struct address_space *aspace;
980
981 /* The instruction's address. */
982 CORE_ADDR address;
983 };
984
985 /* The step-over info of the location that is being stepped over.
986
987 Note that with async/breakpoint always-inserted mode, a user might
988 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
989 being stepped over. As setting a new breakpoint inserts all
990 breakpoints, we need to make sure the breakpoint being stepped over
991 isn't inserted then. We do that by only clearing the step-over
992 info when the step-over is actually finished (or aborted).
993
994 Presently GDB can only step over one breakpoint at any given time.
995 Given threads that can't run code in the same address space as the
996 breakpoint's can't really miss the breakpoint, GDB could be taught
997 to step-over at most one breakpoint per address space (so this info
998 could move to the address space object if/when GDB is extended).
999 The set of breakpoints being stepped over will normally be much
1000 smaller than the set of all breakpoints, so a flag in the
1001 breakpoint location structure would be wasteful. A separate list
1002 also saves complexity and run-time, as otherwise we'd have to go
1003 through all breakpoint locations clearing their flag whenever we
1004 start a new sequence. Similar considerations weigh against storing
1005 this info in the thread object. Plus, not all step overs actually
1006 have breakpoint locations -- e.g., stepping past a single-step
1007 breakpoint, or stepping to complete a non-continuable
1008 watchpoint. */
1009 static struct step_over_info step_over_info;
1010
1011 /* Record the address of the breakpoint/instruction we're currently
1012 stepping over. */
1013
1014 static void
1015 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1016 {
1017 step_over_info.aspace = aspace;
1018 step_over_info.address = address;
1019 }
1020
1021 /* Called when we're not longer stepping over a breakpoint / an
1022 instruction, so all breakpoints are free to be (re)inserted. */
1023
1024 static void
1025 clear_step_over_info (void)
1026 {
1027 step_over_info.aspace = NULL;
1028 step_over_info.address = 0;
1029 }
1030
1031 /* See inferior.h. */
1032
1033 int
1034 stepping_past_instruction_at (struct address_space *aspace,
1035 CORE_ADDR address)
1036 {
1037 return (step_over_info.aspace != NULL
1038 && breakpoint_address_match (aspace, address,
1039 step_over_info.aspace,
1040 step_over_info.address));
1041 }
1042
1043 \f
1044 /* Displaced stepping. */
1045
1046 /* In non-stop debugging mode, we must take special care to manage
1047 breakpoints properly; in particular, the traditional strategy for
1048 stepping a thread past a breakpoint it has hit is unsuitable.
1049 'Displaced stepping' is a tactic for stepping one thread past a
1050 breakpoint it has hit while ensuring that other threads running
1051 concurrently will hit the breakpoint as they should.
1052
1053 The traditional way to step a thread T off a breakpoint in a
1054 multi-threaded program in all-stop mode is as follows:
1055
1056 a0) Initially, all threads are stopped, and breakpoints are not
1057 inserted.
1058 a1) We single-step T, leaving breakpoints uninserted.
1059 a2) We insert breakpoints, and resume all threads.
1060
1061 In non-stop debugging, however, this strategy is unsuitable: we
1062 don't want to have to stop all threads in the system in order to
1063 continue or step T past a breakpoint. Instead, we use displaced
1064 stepping:
1065
1066 n0) Initially, T is stopped, other threads are running, and
1067 breakpoints are inserted.
1068 n1) We copy the instruction "under" the breakpoint to a separate
1069 location, outside the main code stream, making any adjustments
1070 to the instruction, register, and memory state as directed by
1071 T's architecture.
1072 n2) We single-step T over the instruction at its new location.
1073 n3) We adjust the resulting register and memory state as directed
1074 by T's architecture. This includes resetting T's PC to point
1075 back into the main instruction stream.
1076 n4) We resume T.
1077
1078 This approach depends on the following gdbarch methods:
1079
1080 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1081 indicate where to copy the instruction, and how much space must
1082 be reserved there. We use these in step n1.
1083
1084 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1085 address, and makes any necessary adjustments to the instruction,
1086 register contents, and memory. We use this in step n1.
1087
1088 - gdbarch_displaced_step_fixup adjusts registers and memory after
1089 we have successfuly single-stepped the instruction, to yield the
1090 same effect the instruction would have had if we had executed it
1091 at its original address. We use this in step n3.
1092
1093 - gdbarch_displaced_step_free_closure provides cleanup.
1094
1095 The gdbarch_displaced_step_copy_insn and
1096 gdbarch_displaced_step_fixup functions must be written so that
1097 copying an instruction with gdbarch_displaced_step_copy_insn,
1098 single-stepping across the copied instruction, and then applying
1099 gdbarch_displaced_insn_fixup should have the same effects on the
1100 thread's memory and registers as stepping the instruction in place
1101 would have. Exactly which responsibilities fall to the copy and
1102 which fall to the fixup is up to the author of those functions.
1103
1104 See the comments in gdbarch.sh for details.
1105
1106 Note that displaced stepping and software single-step cannot
1107 currently be used in combination, although with some care I think
1108 they could be made to. Software single-step works by placing
1109 breakpoints on all possible subsequent instructions; if the
1110 displaced instruction is a PC-relative jump, those breakpoints
1111 could fall in very strange places --- on pages that aren't
1112 executable, or at addresses that are not proper instruction
1113 boundaries. (We do generally let other threads run while we wait
1114 to hit the software single-step breakpoint, and they might
1115 encounter such a corrupted instruction.) One way to work around
1116 this would be to have gdbarch_displaced_step_copy_insn fully
1117 simulate the effect of PC-relative instructions (and return NULL)
1118 on architectures that use software single-stepping.
1119
1120 In non-stop mode, we can have independent and simultaneous step
1121 requests, so more than one thread may need to simultaneously step
1122 over a breakpoint. The current implementation assumes there is
1123 only one scratch space per process. In this case, we have to
1124 serialize access to the scratch space. If thread A wants to step
1125 over a breakpoint, but we are currently waiting for some other
1126 thread to complete a displaced step, we leave thread A stopped and
1127 place it in the displaced_step_request_queue. Whenever a displaced
1128 step finishes, we pick the next thread in the queue and start a new
1129 displaced step operation on it. See displaced_step_prepare and
1130 displaced_step_fixup for details. */
1131
1132 struct displaced_step_request
1133 {
1134 ptid_t ptid;
1135 struct displaced_step_request *next;
1136 };
1137
1138 /* Per-inferior displaced stepping state. */
1139 struct displaced_step_inferior_state
1140 {
1141 /* Pointer to next in linked list. */
1142 struct displaced_step_inferior_state *next;
1143
1144 /* The process this displaced step state refers to. */
1145 int pid;
1146
1147 /* A queue of pending displaced stepping requests. One entry per
1148 thread that needs to do a displaced step. */
1149 struct displaced_step_request *step_request_queue;
1150
1151 /* If this is not null_ptid, this is the thread carrying out a
1152 displaced single-step in process PID. This thread's state will
1153 require fixing up once it has completed its step. */
1154 ptid_t step_ptid;
1155
1156 /* The architecture the thread had when we stepped it. */
1157 struct gdbarch *step_gdbarch;
1158
1159 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1160 for post-step cleanup. */
1161 struct displaced_step_closure *step_closure;
1162
1163 /* The address of the original instruction, and the copy we
1164 made. */
1165 CORE_ADDR step_original, step_copy;
1166
1167 /* Saved contents of copy area. */
1168 gdb_byte *step_saved_copy;
1169 };
1170
1171 /* The list of states of processes involved in displaced stepping
1172 presently. */
1173 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1174
1175 /* Get the displaced stepping state of process PID. */
1176
1177 static struct displaced_step_inferior_state *
1178 get_displaced_stepping_state (int pid)
1179 {
1180 struct displaced_step_inferior_state *state;
1181
1182 for (state = displaced_step_inferior_states;
1183 state != NULL;
1184 state = state->next)
1185 if (state->pid == pid)
1186 return state;
1187
1188 return NULL;
1189 }
1190
1191 /* Add a new displaced stepping state for process PID to the displaced
1192 stepping state list, or return a pointer to an already existing
1193 entry, if it already exists. Never returns NULL. */
1194
1195 static struct displaced_step_inferior_state *
1196 add_displaced_stepping_state (int pid)
1197 {
1198 struct displaced_step_inferior_state *state;
1199
1200 for (state = displaced_step_inferior_states;
1201 state != NULL;
1202 state = state->next)
1203 if (state->pid == pid)
1204 return state;
1205
1206 state = xcalloc (1, sizeof (*state));
1207 state->pid = pid;
1208 state->next = displaced_step_inferior_states;
1209 displaced_step_inferior_states = state;
1210
1211 return state;
1212 }
1213
1214 /* If inferior is in displaced stepping, and ADDR equals to starting address
1215 of copy area, return corresponding displaced_step_closure. Otherwise,
1216 return NULL. */
1217
1218 struct displaced_step_closure*
1219 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1220 {
1221 struct displaced_step_inferior_state *displaced
1222 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1223
1224 /* If checking the mode of displaced instruction in copy area. */
1225 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1226 && (displaced->step_copy == addr))
1227 return displaced->step_closure;
1228
1229 return NULL;
1230 }
1231
1232 /* Remove the displaced stepping state of process PID. */
1233
1234 static void
1235 remove_displaced_stepping_state (int pid)
1236 {
1237 struct displaced_step_inferior_state *it, **prev_next_p;
1238
1239 gdb_assert (pid != 0);
1240
1241 it = displaced_step_inferior_states;
1242 prev_next_p = &displaced_step_inferior_states;
1243 while (it)
1244 {
1245 if (it->pid == pid)
1246 {
1247 *prev_next_p = it->next;
1248 xfree (it);
1249 return;
1250 }
1251
1252 prev_next_p = &it->next;
1253 it = *prev_next_p;
1254 }
1255 }
1256
1257 static void
1258 infrun_inferior_exit (struct inferior *inf)
1259 {
1260 remove_displaced_stepping_state (inf->pid);
1261 }
1262
1263 /* If ON, and the architecture supports it, GDB will use displaced
1264 stepping to step over breakpoints. If OFF, or if the architecture
1265 doesn't support it, GDB will instead use the traditional
1266 hold-and-step approach. If AUTO (which is the default), GDB will
1267 decide which technique to use to step over breakpoints depending on
1268 which of all-stop or non-stop mode is active --- displaced stepping
1269 in non-stop mode; hold-and-step in all-stop mode. */
1270
1271 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1272
1273 static void
1274 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1275 struct cmd_list_element *c,
1276 const char *value)
1277 {
1278 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1279 fprintf_filtered (file,
1280 _("Debugger's willingness to use displaced stepping "
1281 "to step over breakpoints is %s (currently %s).\n"),
1282 value, non_stop ? "on" : "off");
1283 else
1284 fprintf_filtered (file,
1285 _("Debugger's willingness to use displaced stepping "
1286 "to step over breakpoints is %s.\n"), value);
1287 }
1288
1289 /* Return non-zero if displaced stepping can/should be used to step
1290 over breakpoints. */
1291
1292 static int
1293 use_displaced_stepping (struct gdbarch *gdbarch)
1294 {
1295 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1296 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1297 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1298 && find_record_target () == NULL);
1299 }
1300
1301 /* Clean out any stray displaced stepping state. */
1302 static void
1303 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1304 {
1305 /* Indicate that there is no cleanup pending. */
1306 displaced->step_ptid = null_ptid;
1307
1308 if (displaced->step_closure)
1309 {
1310 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1311 displaced->step_closure);
1312 displaced->step_closure = NULL;
1313 }
1314 }
1315
1316 static void
1317 displaced_step_clear_cleanup (void *arg)
1318 {
1319 struct displaced_step_inferior_state *state = arg;
1320
1321 displaced_step_clear (state);
1322 }
1323
1324 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1325 void
1326 displaced_step_dump_bytes (struct ui_file *file,
1327 const gdb_byte *buf,
1328 size_t len)
1329 {
1330 int i;
1331
1332 for (i = 0; i < len; i++)
1333 fprintf_unfiltered (file, "%02x ", buf[i]);
1334 fputs_unfiltered ("\n", file);
1335 }
1336
1337 /* Prepare to single-step, using displaced stepping.
1338
1339 Note that we cannot use displaced stepping when we have a signal to
1340 deliver. If we have a signal to deliver and an instruction to step
1341 over, then after the step, there will be no indication from the
1342 target whether the thread entered a signal handler or ignored the
1343 signal and stepped over the instruction successfully --- both cases
1344 result in a simple SIGTRAP. In the first case we mustn't do a
1345 fixup, and in the second case we must --- but we can't tell which.
1346 Comments in the code for 'random signals' in handle_inferior_event
1347 explain how we handle this case instead.
1348
1349 Returns 1 if preparing was successful -- this thread is going to be
1350 stepped now; or 0 if displaced stepping this thread got queued. */
1351 static int
1352 displaced_step_prepare (ptid_t ptid)
1353 {
1354 struct cleanup *old_cleanups, *ignore_cleanups;
1355 struct thread_info *tp = find_thread_ptid (ptid);
1356 struct regcache *regcache = get_thread_regcache (ptid);
1357 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1358 CORE_ADDR original, copy;
1359 ULONGEST len;
1360 struct displaced_step_closure *closure;
1361 struct displaced_step_inferior_state *displaced;
1362 int status;
1363
1364 /* We should never reach this function if the architecture does not
1365 support displaced stepping. */
1366 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1367
1368 /* Disable range stepping while executing in the scratch pad. We
1369 want a single-step even if executing the displaced instruction in
1370 the scratch buffer lands within the stepping range (e.g., a
1371 jump/branch). */
1372 tp->control.may_range_step = 0;
1373
1374 /* We have to displaced step one thread at a time, as we only have
1375 access to a single scratch space per inferior. */
1376
1377 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1378
1379 if (!ptid_equal (displaced->step_ptid, null_ptid))
1380 {
1381 /* Already waiting for a displaced step to finish. Defer this
1382 request and place in queue. */
1383 struct displaced_step_request *req, *new_req;
1384
1385 if (debug_displaced)
1386 fprintf_unfiltered (gdb_stdlog,
1387 "displaced: defering step of %s\n",
1388 target_pid_to_str (ptid));
1389
1390 new_req = xmalloc (sizeof (*new_req));
1391 new_req->ptid = ptid;
1392 new_req->next = NULL;
1393
1394 if (displaced->step_request_queue)
1395 {
1396 for (req = displaced->step_request_queue;
1397 req && req->next;
1398 req = req->next)
1399 ;
1400 req->next = new_req;
1401 }
1402 else
1403 displaced->step_request_queue = new_req;
1404
1405 return 0;
1406 }
1407 else
1408 {
1409 if (debug_displaced)
1410 fprintf_unfiltered (gdb_stdlog,
1411 "displaced: stepping %s now\n",
1412 target_pid_to_str (ptid));
1413 }
1414
1415 displaced_step_clear (displaced);
1416
1417 old_cleanups = save_inferior_ptid ();
1418 inferior_ptid = ptid;
1419
1420 original = regcache_read_pc (regcache);
1421
1422 copy = gdbarch_displaced_step_location (gdbarch);
1423 len = gdbarch_max_insn_length (gdbarch);
1424
1425 /* Save the original contents of the copy area. */
1426 displaced->step_saved_copy = xmalloc (len);
1427 ignore_cleanups = make_cleanup (free_current_contents,
1428 &displaced->step_saved_copy);
1429 status = target_read_memory (copy, displaced->step_saved_copy, len);
1430 if (status != 0)
1431 throw_error (MEMORY_ERROR,
1432 _("Error accessing memory address %s (%s) for "
1433 "displaced-stepping scratch space."),
1434 paddress (gdbarch, copy), safe_strerror (status));
1435 if (debug_displaced)
1436 {
1437 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1438 paddress (gdbarch, copy));
1439 displaced_step_dump_bytes (gdb_stdlog,
1440 displaced->step_saved_copy,
1441 len);
1442 };
1443
1444 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1445 original, copy, regcache);
1446
1447 /* We don't support the fully-simulated case at present. */
1448 gdb_assert (closure);
1449
1450 /* Save the information we need to fix things up if the step
1451 succeeds. */
1452 displaced->step_ptid = ptid;
1453 displaced->step_gdbarch = gdbarch;
1454 displaced->step_closure = closure;
1455 displaced->step_original = original;
1456 displaced->step_copy = copy;
1457
1458 make_cleanup (displaced_step_clear_cleanup, displaced);
1459
1460 /* Resume execution at the copy. */
1461 regcache_write_pc (regcache, copy);
1462
1463 discard_cleanups (ignore_cleanups);
1464
1465 do_cleanups (old_cleanups);
1466
1467 if (debug_displaced)
1468 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1469 paddress (gdbarch, copy));
1470
1471 return 1;
1472 }
1473
1474 static void
1475 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1476 const gdb_byte *myaddr, int len)
1477 {
1478 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1479
1480 inferior_ptid = ptid;
1481 write_memory (memaddr, myaddr, len);
1482 do_cleanups (ptid_cleanup);
1483 }
1484
1485 /* Restore the contents of the copy area for thread PTID. */
1486
1487 static void
1488 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1489 ptid_t ptid)
1490 {
1491 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1492
1493 write_memory_ptid (ptid, displaced->step_copy,
1494 displaced->step_saved_copy, len);
1495 if (debug_displaced)
1496 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1497 target_pid_to_str (ptid),
1498 paddress (displaced->step_gdbarch,
1499 displaced->step_copy));
1500 }
1501
1502 static void
1503 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1504 {
1505 struct cleanup *old_cleanups;
1506 struct displaced_step_inferior_state *displaced
1507 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1508
1509 /* Was any thread of this process doing a displaced step? */
1510 if (displaced == NULL)
1511 return;
1512
1513 /* Was this event for the pid we displaced? */
1514 if (ptid_equal (displaced->step_ptid, null_ptid)
1515 || ! ptid_equal (displaced->step_ptid, event_ptid))
1516 return;
1517
1518 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1519
1520 displaced_step_restore (displaced, displaced->step_ptid);
1521
1522 /* Did the instruction complete successfully? */
1523 if (signal == GDB_SIGNAL_TRAP)
1524 {
1525 /* Fix up the resulting state. */
1526 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1527 displaced->step_closure,
1528 displaced->step_original,
1529 displaced->step_copy,
1530 get_thread_regcache (displaced->step_ptid));
1531 }
1532 else
1533 {
1534 /* Since the instruction didn't complete, all we can do is
1535 relocate the PC. */
1536 struct regcache *regcache = get_thread_regcache (event_ptid);
1537 CORE_ADDR pc = regcache_read_pc (regcache);
1538
1539 pc = displaced->step_original + (pc - displaced->step_copy);
1540 regcache_write_pc (regcache, pc);
1541 }
1542
1543 do_cleanups (old_cleanups);
1544
1545 displaced->step_ptid = null_ptid;
1546
1547 /* Are there any pending displaced stepping requests? If so, run
1548 one now. Leave the state object around, since we're likely to
1549 need it again soon. */
1550 while (displaced->step_request_queue)
1551 {
1552 struct displaced_step_request *head;
1553 ptid_t ptid;
1554 struct regcache *regcache;
1555 struct gdbarch *gdbarch;
1556 CORE_ADDR actual_pc;
1557 struct address_space *aspace;
1558
1559 head = displaced->step_request_queue;
1560 ptid = head->ptid;
1561 displaced->step_request_queue = head->next;
1562 xfree (head);
1563
1564 context_switch (ptid);
1565
1566 regcache = get_thread_regcache (ptid);
1567 actual_pc = regcache_read_pc (regcache);
1568 aspace = get_regcache_aspace (regcache);
1569
1570 if (breakpoint_here_p (aspace, actual_pc))
1571 {
1572 if (debug_displaced)
1573 fprintf_unfiltered (gdb_stdlog,
1574 "displaced: stepping queued %s now\n",
1575 target_pid_to_str (ptid));
1576
1577 displaced_step_prepare (ptid);
1578
1579 gdbarch = get_regcache_arch (regcache);
1580
1581 if (debug_displaced)
1582 {
1583 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1584 gdb_byte buf[4];
1585
1586 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1587 paddress (gdbarch, actual_pc));
1588 read_memory (actual_pc, buf, sizeof (buf));
1589 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1590 }
1591
1592 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1593 displaced->step_closure))
1594 target_resume (ptid, 1, GDB_SIGNAL_0);
1595 else
1596 target_resume (ptid, 0, GDB_SIGNAL_0);
1597
1598 /* Done, we're stepping a thread. */
1599 break;
1600 }
1601 else
1602 {
1603 int step;
1604 struct thread_info *tp = inferior_thread ();
1605
1606 /* The breakpoint we were sitting under has since been
1607 removed. */
1608 tp->control.trap_expected = 0;
1609
1610 /* Go back to what we were trying to do. */
1611 step = currently_stepping (tp);
1612
1613 if (debug_displaced)
1614 fprintf_unfiltered (gdb_stdlog,
1615 "displaced: breakpoint is gone: %s, step(%d)\n",
1616 target_pid_to_str (tp->ptid), step);
1617
1618 target_resume (ptid, step, GDB_SIGNAL_0);
1619 tp->suspend.stop_signal = GDB_SIGNAL_0;
1620
1621 /* This request was discarded. See if there's any other
1622 thread waiting for its turn. */
1623 }
1624 }
1625 }
1626
1627 /* Update global variables holding ptids to hold NEW_PTID if they were
1628 holding OLD_PTID. */
1629 static void
1630 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1631 {
1632 struct displaced_step_request *it;
1633 struct displaced_step_inferior_state *displaced;
1634
1635 if (ptid_equal (inferior_ptid, old_ptid))
1636 inferior_ptid = new_ptid;
1637
1638 if (ptid_equal (singlestep_ptid, old_ptid))
1639 singlestep_ptid = new_ptid;
1640
1641 for (displaced = displaced_step_inferior_states;
1642 displaced;
1643 displaced = displaced->next)
1644 {
1645 if (ptid_equal (displaced->step_ptid, old_ptid))
1646 displaced->step_ptid = new_ptid;
1647
1648 for (it = displaced->step_request_queue; it; it = it->next)
1649 if (ptid_equal (it->ptid, old_ptid))
1650 it->ptid = new_ptid;
1651 }
1652 }
1653
1654 \f
1655 /* Resuming. */
1656
1657 /* Things to clean up if we QUIT out of resume (). */
1658 static void
1659 resume_cleanups (void *ignore)
1660 {
1661 normal_stop ();
1662 }
1663
1664 static const char schedlock_off[] = "off";
1665 static const char schedlock_on[] = "on";
1666 static const char schedlock_step[] = "step";
1667 static const char *const scheduler_enums[] = {
1668 schedlock_off,
1669 schedlock_on,
1670 schedlock_step,
1671 NULL
1672 };
1673 static const char *scheduler_mode = schedlock_off;
1674 static void
1675 show_scheduler_mode (struct ui_file *file, int from_tty,
1676 struct cmd_list_element *c, const char *value)
1677 {
1678 fprintf_filtered (file,
1679 _("Mode for locking scheduler "
1680 "during execution is \"%s\".\n"),
1681 value);
1682 }
1683
1684 static void
1685 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1686 {
1687 if (!target_can_lock_scheduler)
1688 {
1689 scheduler_mode = schedlock_off;
1690 error (_("Target '%s' cannot support this command."), target_shortname);
1691 }
1692 }
1693
1694 /* True if execution commands resume all threads of all processes by
1695 default; otherwise, resume only threads of the current inferior
1696 process. */
1697 int sched_multi = 0;
1698
1699 /* Try to setup for software single stepping over the specified location.
1700 Return 1 if target_resume() should use hardware single step.
1701
1702 GDBARCH the current gdbarch.
1703 PC the location to step over. */
1704
1705 static int
1706 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1707 {
1708 int hw_step = 1;
1709
1710 if (execution_direction == EXEC_FORWARD
1711 && gdbarch_software_single_step_p (gdbarch)
1712 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1713 {
1714 hw_step = 0;
1715 /* Do not pull these breakpoints until after a `wait' in
1716 `wait_for_inferior'. */
1717 singlestep_breakpoints_inserted_p = 1;
1718 singlestep_ptid = inferior_ptid;
1719 singlestep_pc = pc;
1720 }
1721 return hw_step;
1722 }
1723
1724 /* Return a ptid representing the set of threads that we will proceed,
1725 in the perspective of the user/frontend. We may actually resume
1726 fewer threads at first, e.g., if a thread is stopped at a
1727 breakpoint that needs stepping-off, but that should not be visible
1728 to the user/frontend, and neither should the frontend/user be
1729 allowed to proceed any of the threads that happen to be stopped for
1730 internal run control handling, if a previous command wanted them
1731 resumed. */
1732
1733 ptid_t
1734 user_visible_resume_ptid (int step)
1735 {
1736 /* By default, resume all threads of all processes. */
1737 ptid_t resume_ptid = RESUME_ALL;
1738
1739 /* Maybe resume only all threads of the current process. */
1740 if (!sched_multi && target_supports_multi_process ())
1741 {
1742 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1743 }
1744
1745 /* Maybe resume a single thread after all. */
1746 if (non_stop)
1747 {
1748 /* With non-stop mode on, threads are always handled
1749 individually. */
1750 resume_ptid = inferior_ptid;
1751 }
1752 else if ((scheduler_mode == schedlock_on)
1753 || (scheduler_mode == schedlock_step
1754 && (step || singlestep_breakpoints_inserted_p)))
1755 {
1756 /* User-settable 'scheduler' mode requires solo thread resume. */
1757 resume_ptid = inferior_ptid;
1758 }
1759
1760 return resume_ptid;
1761 }
1762
1763 /* Resume the inferior, but allow a QUIT. This is useful if the user
1764 wants to interrupt some lengthy single-stepping operation
1765 (for child processes, the SIGINT goes to the inferior, and so
1766 we get a SIGINT random_signal, but for remote debugging and perhaps
1767 other targets, that's not true).
1768
1769 STEP nonzero if we should step (zero to continue instead).
1770 SIG is the signal to give the inferior (zero for none). */
1771 void
1772 resume (int step, enum gdb_signal sig)
1773 {
1774 int should_resume = 1;
1775 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1776 struct regcache *regcache = get_current_regcache ();
1777 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1778 struct thread_info *tp = inferior_thread ();
1779 CORE_ADDR pc = regcache_read_pc (regcache);
1780 struct address_space *aspace = get_regcache_aspace (regcache);
1781
1782 QUIT;
1783
1784 if (current_inferior ()->waiting_for_vfork_done)
1785 {
1786 /* Don't try to single-step a vfork parent that is waiting for
1787 the child to get out of the shared memory region (by exec'ing
1788 or exiting). This is particularly important on software
1789 single-step archs, as the child process would trip on the
1790 software single step breakpoint inserted for the parent
1791 process. Since the parent will not actually execute any
1792 instruction until the child is out of the shared region (such
1793 are vfork's semantics), it is safe to simply continue it.
1794 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1795 the parent, and tell it to `keep_going', which automatically
1796 re-sets it stepping. */
1797 if (debug_infrun)
1798 fprintf_unfiltered (gdb_stdlog,
1799 "infrun: resume : clear step\n");
1800 step = 0;
1801 }
1802
1803 if (debug_infrun)
1804 fprintf_unfiltered (gdb_stdlog,
1805 "infrun: resume (step=%d, signal=%s), "
1806 "trap_expected=%d, current thread [%s] at %s\n",
1807 step, gdb_signal_to_symbol_string (sig),
1808 tp->control.trap_expected,
1809 target_pid_to_str (inferior_ptid),
1810 paddress (gdbarch, pc));
1811
1812 /* Normally, by the time we reach `resume', the breakpoints are either
1813 removed or inserted, as appropriate. The exception is if we're sitting
1814 at a permanent breakpoint; we need to step over it, but permanent
1815 breakpoints can't be removed. So we have to test for it here. */
1816 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1817 {
1818 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1819 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1820 else
1821 error (_("\
1822 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1823 how to step past a permanent breakpoint on this architecture. Try using\n\
1824 a command like `return' or `jump' to continue execution."));
1825 }
1826
1827 /* If we have a breakpoint to step over, make sure to do a single
1828 step only. Same if we have software watchpoints. */
1829 if (tp->control.trap_expected || bpstat_should_step ())
1830 tp->control.may_range_step = 0;
1831
1832 /* If enabled, step over breakpoints by executing a copy of the
1833 instruction at a different address.
1834
1835 We can't use displaced stepping when we have a signal to deliver;
1836 the comments for displaced_step_prepare explain why. The
1837 comments in the handle_inferior event for dealing with 'random
1838 signals' explain what we do instead.
1839
1840 We can't use displaced stepping when we are waiting for vfork_done
1841 event, displaced stepping breaks the vfork child similarly as single
1842 step software breakpoint. */
1843 if (use_displaced_stepping (gdbarch)
1844 && (tp->control.trap_expected
1845 || (step && gdbarch_software_single_step_p (gdbarch)))
1846 && sig == GDB_SIGNAL_0
1847 && !current_inferior ()->waiting_for_vfork_done)
1848 {
1849 struct displaced_step_inferior_state *displaced;
1850
1851 if (!displaced_step_prepare (inferior_ptid))
1852 {
1853 /* Got placed in displaced stepping queue. Will be resumed
1854 later when all the currently queued displaced stepping
1855 requests finish. The thread is not executing at this point,
1856 and the call to set_executing will be made later. But we
1857 need to call set_running here, since from frontend point of view,
1858 the thread is running. */
1859 set_running (inferior_ptid, 1);
1860 discard_cleanups (old_cleanups);
1861 return;
1862 }
1863
1864 /* Update pc to reflect the new address from which we will execute
1865 instructions due to displaced stepping. */
1866 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1867
1868 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1869 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1870 displaced->step_closure);
1871 }
1872
1873 /* Do we need to do it the hard way, w/temp breakpoints? */
1874 else if (step)
1875 step = maybe_software_singlestep (gdbarch, pc);
1876
1877 /* Currently, our software single-step implementation leads to different
1878 results than hardware single-stepping in one situation: when stepping
1879 into delivering a signal which has an associated signal handler,
1880 hardware single-step will stop at the first instruction of the handler,
1881 while software single-step will simply skip execution of the handler.
1882
1883 For now, this difference in behavior is accepted since there is no
1884 easy way to actually implement single-stepping into a signal handler
1885 without kernel support.
1886
1887 However, there is one scenario where this difference leads to follow-on
1888 problems: if we're stepping off a breakpoint by removing all breakpoints
1889 and then single-stepping. In this case, the software single-step
1890 behavior means that even if there is a *breakpoint* in the signal
1891 handler, GDB still would not stop.
1892
1893 Fortunately, we can at least fix this particular issue. We detect
1894 here the case where we are about to deliver a signal while software
1895 single-stepping with breakpoints removed. In this situation, we
1896 revert the decisions to remove all breakpoints and insert single-
1897 step breakpoints, and instead we install a step-resume breakpoint
1898 at the current address, deliver the signal without stepping, and
1899 once we arrive back at the step-resume breakpoint, actually step
1900 over the breakpoint we originally wanted to step over. */
1901 if (singlestep_breakpoints_inserted_p
1902 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1903 {
1904 /* If we have nested signals or a pending signal is delivered
1905 immediately after a handler returns, might might already have
1906 a step-resume breakpoint set on the earlier handler. We cannot
1907 set another step-resume breakpoint; just continue on until the
1908 original breakpoint is hit. */
1909 if (tp->control.step_resume_breakpoint == NULL)
1910 {
1911 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1912 tp->step_after_step_resume_breakpoint = 1;
1913 }
1914
1915 remove_single_step_breakpoints ();
1916 singlestep_breakpoints_inserted_p = 0;
1917
1918 clear_step_over_info ();
1919 tp->control.trap_expected = 0;
1920
1921 insert_breakpoints ();
1922 }
1923
1924 if (should_resume)
1925 {
1926 ptid_t resume_ptid;
1927
1928 /* If STEP is set, it's a request to use hardware stepping
1929 facilities. But in that case, we should never
1930 use singlestep breakpoint. */
1931 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1932
1933 /* Decide the set of threads to ask the target to resume. Start
1934 by assuming everything will be resumed, than narrow the set
1935 by applying increasingly restricting conditions. */
1936 resume_ptid = user_visible_resume_ptid (step);
1937
1938 /* Maybe resume a single thread after all. */
1939 if ((step || singlestep_breakpoints_inserted_p)
1940 && tp->control.trap_expected)
1941 {
1942 /* We're allowing a thread to run past a breakpoint it has
1943 hit, by single-stepping the thread with the breakpoint
1944 removed. In which case, we need to single-step only this
1945 thread, and keep others stopped, as they can miss this
1946 breakpoint if allowed to run. */
1947 resume_ptid = inferior_ptid;
1948 }
1949
1950 if (gdbarch_cannot_step_breakpoint (gdbarch))
1951 {
1952 /* Most targets can step a breakpoint instruction, thus
1953 executing it normally. But if this one cannot, just
1954 continue and we will hit it anyway. */
1955 if (step && breakpoint_inserted_here_p (aspace, pc))
1956 step = 0;
1957 }
1958
1959 if (debug_displaced
1960 && use_displaced_stepping (gdbarch)
1961 && tp->control.trap_expected)
1962 {
1963 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1964 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1965 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1966 gdb_byte buf[4];
1967
1968 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1969 paddress (resume_gdbarch, actual_pc));
1970 read_memory (actual_pc, buf, sizeof (buf));
1971 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1972 }
1973
1974 if (tp->control.may_range_step)
1975 {
1976 /* If we're resuming a thread with the PC out of the step
1977 range, then we're doing some nested/finer run control
1978 operation, like stepping the thread out of the dynamic
1979 linker or the displaced stepping scratch pad. We
1980 shouldn't have allowed a range step then. */
1981 gdb_assert (pc_in_thread_step_range (pc, tp));
1982 }
1983
1984 /* Install inferior's terminal modes. */
1985 target_terminal_inferior ();
1986
1987 /* Avoid confusing the next resume, if the next stop/resume
1988 happens to apply to another thread. */
1989 tp->suspend.stop_signal = GDB_SIGNAL_0;
1990
1991 /* Advise target which signals may be handled silently. If we have
1992 removed breakpoints because we are stepping over one (which can
1993 happen only if we are not using displaced stepping), we need to
1994 receive all signals to avoid accidentally skipping a breakpoint
1995 during execution of a signal handler. */
1996 if ((step || singlestep_breakpoints_inserted_p)
1997 && tp->control.trap_expected
1998 && !use_displaced_stepping (gdbarch))
1999 target_pass_signals (0, NULL);
2000 else
2001 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2002
2003 target_resume (resume_ptid, step, sig);
2004 }
2005
2006 discard_cleanups (old_cleanups);
2007 }
2008 \f
2009 /* Proceeding. */
2010
2011 /* Clear out all variables saying what to do when inferior is continued.
2012 First do this, then set the ones you want, then call `proceed'. */
2013
2014 static void
2015 clear_proceed_status_thread (struct thread_info *tp)
2016 {
2017 if (debug_infrun)
2018 fprintf_unfiltered (gdb_stdlog,
2019 "infrun: clear_proceed_status_thread (%s)\n",
2020 target_pid_to_str (tp->ptid));
2021
2022 tp->control.trap_expected = 0;
2023 tp->control.step_range_start = 0;
2024 tp->control.step_range_end = 0;
2025 tp->control.may_range_step = 0;
2026 tp->control.step_frame_id = null_frame_id;
2027 tp->control.step_stack_frame_id = null_frame_id;
2028 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2029 tp->stop_requested = 0;
2030
2031 tp->control.stop_step = 0;
2032
2033 tp->control.proceed_to_finish = 0;
2034
2035 /* Discard any remaining commands or status from previous stop. */
2036 bpstat_clear (&tp->control.stop_bpstat);
2037 }
2038
2039 static int
2040 clear_proceed_status_callback (struct thread_info *tp, void *data)
2041 {
2042 if (is_exited (tp->ptid))
2043 return 0;
2044
2045 clear_proceed_status_thread (tp);
2046 return 0;
2047 }
2048
2049 void
2050 clear_proceed_status (void)
2051 {
2052 if (!non_stop)
2053 {
2054 /* In all-stop mode, delete the per-thread status of all
2055 threads, even if inferior_ptid is null_ptid, there may be
2056 threads on the list. E.g., we may be launching a new
2057 process, while selecting the executable. */
2058 iterate_over_threads (clear_proceed_status_callback, NULL);
2059 }
2060
2061 if (!ptid_equal (inferior_ptid, null_ptid))
2062 {
2063 struct inferior *inferior;
2064
2065 if (non_stop)
2066 {
2067 /* If in non-stop mode, only delete the per-thread status of
2068 the current thread. */
2069 clear_proceed_status_thread (inferior_thread ());
2070 }
2071
2072 inferior = current_inferior ();
2073 inferior->control.stop_soon = NO_STOP_QUIETLY;
2074 }
2075
2076 stop_after_trap = 0;
2077
2078 clear_step_over_info ();
2079
2080 observer_notify_about_to_proceed ();
2081
2082 if (stop_registers)
2083 {
2084 regcache_xfree (stop_registers);
2085 stop_registers = NULL;
2086 }
2087 }
2088
2089 /* Returns true if TP is still stopped at a breakpoint that needs
2090 stepping-over in order to make progress. If the breakpoint is gone
2091 meanwhile, we can skip the whole step-over dance. */
2092
2093 static int
2094 thread_still_needs_step_over (struct thread_info *tp)
2095 {
2096 if (tp->stepping_over_breakpoint)
2097 {
2098 struct regcache *regcache = get_thread_regcache (tp->ptid);
2099
2100 if (breakpoint_here_p (get_regcache_aspace (regcache),
2101 regcache_read_pc (regcache)))
2102 return 1;
2103
2104 tp->stepping_over_breakpoint = 0;
2105 }
2106
2107 return 0;
2108 }
2109
2110 /* Look a thread other than EXCEPT that has previously reported a
2111 breakpoint event, and thus needs a step-over in order to make
2112 progress. Returns NULL is none is found. STEP indicates whether
2113 we're about to step the current thread, in order to decide whether
2114 "set scheduler-locking step" applies. */
2115
2116 static struct thread_info *
2117 find_thread_needs_step_over (int step, struct thread_info *except)
2118 {
2119 int schedlock_enabled;
2120 struct thread_info *tp, *current;
2121
2122 /* With non-stop mode on, threads are always handled individually. */
2123 gdb_assert (! non_stop);
2124
2125 schedlock_enabled = (scheduler_mode == schedlock_on
2126 || (scheduler_mode == schedlock_step
2127 && step));
2128
2129 current = inferior_thread ();
2130
2131 /* If scheduler locking applies, we can avoid iterating over all
2132 threads. */
2133 if (schedlock_enabled)
2134 {
2135 if (except != current
2136 && thread_still_needs_step_over (current))
2137 return current;
2138
2139 return NULL;
2140 }
2141
2142 ALL_THREADS (tp)
2143 {
2144 /* Ignore the EXCEPT thread. */
2145 if (tp == except)
2146 continue;
2147 /* Ignore threads of processes we're not resuming. */
2148 if (!sched_multi
2149 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2150 continue;
2151
2152 if (thread_still_needs_step_over (tp))
2153 return tp;
2154 }
2155
2156 return NULL;
2157 }
2158
2159 /* Basic routine for continuing the program in various fashions.
2160
2161 ADDR is the address to resume at, or -1 for resume where stopped.
2162 SIGGNAL is the signal to give it, or 0 for none,
2163 or -1 for act according to how it stopped.
2164 STEP is nonzero if should trap after one instruction.
2165 -1 means return after that and print nothing.
2166 You should probably set various step_... variables
2167 before calling here, if you are stepping.
2168
2169 You should call clear_proceed_status before calling proceed. */
2170
2171 void
2172 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2173 {
2174 struct regcache *regcache;
2175 struct gdbarch *gdbarch;
2176 struct thread_info *tp;
2177 CORE_ADDR pc;
2178 struct address_space *aspace;
2179
2180 /* If we're stopped at a fork/vfork, follow the branch set by the
2181 "set follow-fork-mode" command; otherwise, we'll just proceed
2182 resuming the current thread. */
2183 if (!follow_fork ())
2184 {
2185 /* The target for some reason decided not to resume. */
2186 normal_stop ();
2187 if (target_can_async_p ())
2188 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2189 return;
2190 }
2191
2192 /* We'll update this if & when we switch to a new thread. */
2193 previous_inferior_ptid = inferior_ptid;
2194
2195 regcache = get_current_regcache ();
2196 gdbarch = get_regcache_arch (regcache);
2197 aspace = get_regcache_aspace (regcache);
2198 pc = regcache_read_pc (regcache);
2199 tp = inferior_thread ();
2200
2201 if (step > 0)
2202 step_start_function = find_pc_function (pc);
2203 if (step < 0)
2204 stop_after_trap = 1;
2205
2206 /* Fill in with reasonable starting values. */
2207 init_thread_stepping_state (tp);
2208
2209 if (addr == (CORE_ADDR) -1)
2210 {
2211 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2212 && execution_direction != EXEC_REVERSE)
2213 /* There is a breakpoint at the address we will resume at,
2214 step one instruction before inserting breakpoints so that
2215 we do not stop right away (and report a second hit at this
2216 breakpoint).
2217
2218 Note, we don't do this in reverse, because we won't
2219 actually be executing the breakpoint insn anyway.
2220 We'll be (un-)executing the previous instruction. */
2221 tp->stepping_over_breakpoint = 1;
2222 else if (gdbarch_single_step_through_delay_p (gdbarch)
2223 && gdbarch_single_step_through_delay (gdbarch,
2224 get_current_frame ()))
2225 /* We stepped onto an instruction that needs to be stepped
2226 again before re-inserting the breakpoint, do so. */
2227 tp->stepping_over_breakpoint = 1;
2228 }
2229 else
2230 {
2231 regcache_write_pc (regcache, addr);
2232 }
2233
2234 if (debug_infrun)
2235 fprintf_unfiltered (gdb_stdlog,
2236 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2237 paddress (gdbarch, addr),
2238 gdb_signal_to_symbol_string (siggnal), step);
2239
2240 if (non_stop)
2241 /* In non-stop, each thread is handled individually. The context
2242 must already be set to the right thread here. */
2243 ;
2244 else
2245 {
2246 struct thread_info *step_over;
2247
2248 /* In a multi-threaded task we may select another thread and
2249 then continue or step.
2250
2251 But if the old thread was stopped at a breakpoint, it will
2252 immediately cause another breakpoint stop without any
2253 execution (i.e. it will report a breakpoint hit incorrectly).
2254 So we must step over it first.
2255
2256 Look for a thread other than the current (TP) that reported a
2257 breakpoint hit and hasn't been resumed yet since. */
2258 step_over = find_thread_needs_step_over (step, tp);
2259 if (step_over != NULL)
2260 {
2261 if (debug_infrun)
2262 fprintf_unfiltered (gdb_stdlog,
2263 "infrun: need to step-over [%s] first\n",
2264 target_pid_to_str (step_over->ptid));
2265
2266 /* Store the prev_pc for the stepping thread too, needed by
2267 switch_back_to_stepping thread. */
2268 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2269 switch_to_thread (step_over->ptid);
2270 tp = step_over;
2271 }
2272 }
2273
2274 /* If we need to step over a breakpoint, and we're not using
2275 displaced stepping to do so, insert all breakpoints (watchpoints,
2276 etc.) but the one we're stepping over, step one instruction, and
2277 then re-insert the breakpoint when that step is finished. */
2278 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2279 {
2280 struct regcache *regcache = get_current_regcache ();
2281
2282 set_step_over_info (get_regcache_aspace (regcache),
2283 regcache_read_pc (regcache));
2284 }
2285 else
2286 clear_step_over_info ();
2287
2288 insert_breakpoints ();
2289
2290 tp->control.trap_expected = tp->stepping_over_breakpoint;
2291
2292 if (!non_stop)
2293 {
2294 /* Pass the last stop signal to the thread we're resuming,
2295 irrespective of whether the current thread is the thread that
2296 got the last event or not. This was historically GDB's
2297 behaviour before keeping a stop_signal per thread. */
2298
2299 struct thread_info *last_thread;
2300 ptid_t last_ptid;
2301 struct target_waitstatus last_status;
2302
2303 get_last_target_status (&last_ptid, &last_status);
2304 if (!ptid_equal (inferior_ptid, last_ptid)
2305 && !ptid_equal (last_ptid, null_ptid)
2306 && !ptid_equal (last_ptid, minus_one_ptid))
2307 {
2308 last_thread = find_thread_ptid (last_ptid);
2309 if (last_thread)
2310 {
2311 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2312 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2313 }
2314 }
2315 }
2316
2317 if (siggnal != GDB_SIGNAL_DEFAULT)
2318 tp->suspend.stop_signal = siggnal;
2319 /* If this signal should not be seen by program,
2320 give it zero. Used for debugging signals. */
2321 else if (!signal_program[tp->suspend.stop_signal])
2322 tp->suspend.stop_signal = GDB_SIGNAL_0;
2323
2324 annotate_starting ();
2325
2326 /* Make sure that output from GDB appears before output from the
2327 inferior. */
2328 gdb_flush (gdb_stdout);
2329
2330 /* Refresh prev_pc value just prior to resuming. This used to be
2331 done in stop_stepping, however, setting prev_pc there did not handle
2332 scenarios such as inferior function calls or returning from
2333 a function via the return command. In those cases, the prev_pc
2334 value was not set properly for subsequent commands. The prev_pc value
2335 is used to initialize the starting line number in the ecs. With an
2336 invalid value, the gdb next command ends up stopping at the position
2337 represented by the next line table entry past our start position.
2338 On platforms that generate one line table entry per line, this
2339 is not a problem. However, on the ia64, the compiler generates
2340 extraneous line table entries that do not increase the line number.
2341 When we issue the gdb next command on the ia64 after an inferior call
2342 or a return command, we often end up a few instructions forward, still
2343 within the original line we started.
2344
2345 An attempt was made to refresh the prev_pc at the same time the
2346 execution_control_state is initialized (for instance, just before
2347 waiting for an inferior event). But this approach did not work
2348 because of platforms that use ptrace, where the pc register cannot
2349 be read unless the inferior is stopped. At that point, we are not
2350 guaranteed the inferior is stopped and so the regcache_read_pc() call
2351 can fail. Setting the prev_pc value here ensures the value is updated
2352 correctly when the inferior is stopped. */
2353 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2354
2355 /* Reset to normal state. */
2356 init_infwait_state ();
2357
2358 /* Resume inferior. */
2359 resume (tp->control.trap_expected || step || bpstat_should_step (),
2360 tp->suspend.stop_signal);
2361
2362 /* Wait for it to stop (if not standalone)
2363 and in any case decode why it stopped, and act accordingly. */
2364 /* Do this only if we are not using the event loop, or if the target
2365 does not support asynchronous execution. */
2366 if (!target_can_async_p ())
2367 {
2368 wait_for_inferior ();
2369 normal_stop ();
2370 }
2371 }
2372 \f
2373
2374 /* Start remote-debugging of a machine over a serial link. */
2375
2376 void
2377 start_remote (int from_tty)
2378 {
2379 struct inferior *inferior;
2380
2381 inferior = current_inferior ();
2382 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2383
2384 /* Always go on waiting for the target, regardless of the mode. */
2385 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2386 indicate to wait_for_inferior that a target should timeout if
2387 nothing is returned (instead of just blocking). Because of this,
2388 targets expecting an immediate response need to, internally, set
2389 things up so that the target_wait() is forced to eventually
2390 timeout. */
2391 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2392 differentiate to its caller what the state of the target is after
2393 the initial open has been performed. Here we're assuming that
2394 the target has stopped. It should be possible to eventually have
2395 target_open() return to the caller an indication that the target
2396 is currently running and GDB state should be set to the same as
2397 for an async run. */
2398 wait_for_inferior ();
2399
2400 /* Now that the inferior has stopped, do any bookkeeping like
2401 loading shared libraries. We want to do this before normal_stop,
2402 so that the displayed frame is up to date. */
2403 post_create_inferior (&current_target, from_tty);
2404
2405 normal_stop ();
2406 }
2407
2408 /* Initialize static vars when a new inferior begins. */
2409
2410 void
2411 init_wait_for_inferior (void)
2412 {
2413 /* These are meaningless until the first time through wait_for_inferior. */
2414
2415 breakpoint_init_inferior (inf_starting);
2416
2417 clear_proceed_status ();
2418
2419 target_last_wait_ptid = minus_one_ptid;
2420
2421 previous_inferior_ptid = inferior_ptid;
2422 init_infwait_state ();
2423
2424 /* Discard any skipped inlined frames. */
2425 clear_inline_frame_state (minus_one_ptid);
2426
2427 singlestep_ptid = null_ptid;
2428 singlestep_pc = 0;
2429 }
2430
2431 \f
2432 /* This enum encodes possible reasons for doing a target_wait, so that
2433 wfi can call target_wait in one place. (Ultimately the call will be
2434 moved out of the infinite loop entirely.) */
2435
2436 enum infwait_states
2437 {
2438 infwait_normal_state,
2439 infwait_step_watch_state,
2440 infwait_nonstep_watch_state
2441 };
2442
2443 /* The PTID we'll do a target_wait on.*/
2444 ptid_t waiton_ptid;
2445
2446 /* Current inferior wait state. */
2447 static enum infwait_states infwait_state;
2448
2449 /* Data to be passed around while handling an event. This data is
2450 discarded between events. */
2451 struct execution_control_state
2452 {
2453 ptid_t ptid;
2454 /* The thread that got the event, if this was a thread event; NULL
2455 otherwise. */
2456 struct thread_info *event_thread;
2457
2458 struct target_waitstatus ws;
2459 int stop_func_filled_in;
2460 CORE_ADDR stop_func_start;
2461 CORE_ADDR stop_func_end;
2462 const char *stop_func_name;
2463 int wait_some_more;
2464
2465 /* We were in infwait_step_watch_state or
2466 infwait_nonstep_watch_state state, and the thread reported an
2467 event. */
2468 int stepped_after_stopped_by_watchpoint;
2469
2470 /* True if the event thread hit the single-step breakpoint of
2471 another thread. Thus the event doesn't cause a stop, the thread
2472 needs to be single-stepped past the single-step breakpoint before
2473 we can switch back to the original stepping thread. */
2474 int hit_singlestep_breakpoint;
2475 };
2476
2477 static void handle_inferior_event (struct execution_control_state *ecs);
2478
2479 static void handle_step_into_function (struct gdbarch *gdbarch,
2480 struct execution_control_state *ecs);
2481 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2482 struct execution_control_state *ecs);
2483 static void handle_signal_stop (struct execution_control_state *ecs);
2484 static void check_exception_resume (struct execution_control_state *,
2485 struct frame_info *);
2486
2487 static void stop_stepping (struct execution_control_state *ecs);
2488 static void prepare_to_wait (struct execution_control_state *ecs);
2489 static void keep_going (struct execution_control_state *ecs);
2490 static void process_event_stop_test (struct execution_control_state *ecs);
2491 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2492
2493 /* Callback for iterate over threads. If the thread is stopped, but
2494 the user/frontend doesn't know about that yet, go through
2495 normal_stop, as if the thread had just stopped now. ARG points at
2496 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2497 ptid_is_pid(PTID) is true, applies to all threads of the process
2498 pointed at by PTID. Otherwise, apply only to the thread pointed by
2499 PTID. */
2500
2501 static int
2502 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2503 {
2504 ptid_t ptid = * (ptid_t *) arg;
2505
2506 if ((ptid_equal (info->ptid, ptid)
2507 || ptid_equal (minus_one_ptid, ptid)
2508 || (ptid_is_pid (ptid)
2509 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2510 && is_running (info->ptid)
2511 && !is_executing (info->ptid))
2512 {
2513 struct cleanup *old_chain;
2514 struct execution_control_state ecss;
2515 struct execution_control_state *ecs = &ecss;
2516
2517 memset (ecs, 0, sizeof (*ecs));
2518
2519 old_chain = make_cleanup_restore_current_thread ();
2520
2521 overlay_cache_invalid = 1;
2522 /* Flush target cache before starting to handle each event.
2523 Target was running and cache could be stale. This is just a
2524 heuristic. Running threads may modify target memory, but we
2525 don't get any event. */
2526 target_dcache_invalidate ();
2527
2528 /* Go through handle_inferior_event/normal_stop, so we always
2529 have consistent output as if the stop event had been
2530 reported. */
2531 ecs->ptid = info->ptid;
2532 ecs->event_thread = find_thread_ptid (info->ptid);
2533 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2534 ecs->ws.value.sig = GDB_SIGNAL_0;
2535
2536 handle_inferior_event (ecs);
2537
2538 if (!ecs->wait_some_more)
2539 {
2540 struct thread_info *tp;
2541
2542 normal_stop ();
2543
2544 /* Finish off the continuations. */
2545 tp = inferior_thread ();
2546 do_all_intermediate_continuations_thread (tp, 1);
2547 do_all_continuations_thread (tp, 1);
2548 }
2549
2550 do_cleanups (old_chain);
2551 }
2552
2553 return 0;
2554 }
2555
2556 /* This function is attached as a "thread_stop_requested" observer.
2557 Cleanup local state that assumed the PTID was to be resumed, and
2558 report the stop to the frontend. */
2559
2560 static void
2561 infrun_thread_stop_requested (ptid_t ptid)
2562 {
2563 struct displaced_step_inferior_state *displaced;
2564
2565 /* PTID was requested to stop. Remove it from the displaced
2566 stepping queue, so we don't try to resume it automatically. */
2567
2568 for (displaced = displaced_step_inferior_states;
2569 displaced;
2570 displaced = displaced->next)
2571 {
2572 struct displaced_step_request *it, **prev_next_p;
2573
2574 it = displaced->step_request_queue;
2575 prev_next_p = &displaced->step_request_queue;
2576 while (it)
2577 {
2578 if (ptid_match (it->ptid, ptid))
2579 {
2580 *prev_next_p = it->next;
2581 it->next = NULL;
2582 xfree (it);
2583 }
2584 else
2585 {
2586 prev_next_p = &it->next;
2587 }
2588
2589 it = *prev_next_p;
2590 }
2591 }
2592
2593 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2594 }
2595
2596 static void
2597 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2598 {
2599 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2600 nullify_last_target_wait_ptid ();
2601 }
2602
2603 /* Callback for iterate_over_threads. */
2604
2605 static int
2606 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2607 {
2608 if (is_exited (info->ptid))
2609 return 0;
2610
2611 delete_step_resume_breakpoint (info);
2612 delete_exception_resume_breakpoint (info);
2613 return 0;
2614 }
2615
2616 /* In all-stop, delete the step resume breakpoint of any thread that
2617 had one. In non-stop, delete the step resume breakpoint of the
2618 thread that just stopped. */
2619
2620 static void
2621 delete_step_thread_step_resume_breakpoint (void)
2622 {
2623 if (!target_has_execution
2624 || ptid_equal (inferior_ptid, null_ptid))
2625 /* If the inferior has exited, we have already deleted the step
2626 resume breakpoints out of GDB's lists. */
2627 return;
2628
2629 if (non_stop)
2630 {
2631 /* If in non-stop mode, only delete the step-resume or
2632 longjmp-resume breakpoint of the thread that just stopped
2633 stepping. */
2634 struct thread_info *tp = inferior_thread ();
2635
2636 delete_step_resume_breakpoint (tp);
2637 delete_exception_resume_breakpoint (tp);
2638 }
2639 else
2640 /* In all-stop mode, delete all step-resume and longjmp-resume
2641 breakpoints of any thread that had them. */
2642 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2643 }
2644
2645 /* A cleanup wrapper. */
2646
2647 static void
2648 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2649 {
2650 delete_step_thread_step_resume_breakpoint ();
2651 }
2652
2653 /* Pretty print the results of target_wait, for debugging purposes. */
2654
2655 static void
2656 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2657 const struct target_waitstatus *ws)
2658 {
2659 char *status_string = target_waitstatus_to_string (ws);
2660 struct ui_file *tmp_stream = mem_fileopen ();
2661 char *text;
2662
2663 /* The text is split over several lines because it was getting too long.
2664 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2665 output as a unit; we want only one timestamp printed if debug_timestamp
2666 is set. */
2667
2668 fprintf_unfiltered (tmp_stream,
2669 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2670 if (ptid_get_pid (waiton_ptid) != -1)
2671 fprintf_unfiltered (tmp_stream,
2672 " [%s]", target_pid_to_str (waiton_ptid));
2673 fprintf_unfiltered (tmp_stream, ", status) =\n");
2674 fprintf_unfiltered (tmp_stream,
2675 "infrun: %d [%s],\n",
2676 ptid_get_pid (result_ptid),
2677 target_pid_to_str (result_ptid));
2678 fprintf_unfiltered (tmp_stream,
2679 "infrun: %s\n",
2680 status_string);
2681
2682 text = ui_file_xstrdup (tmp_stream, NULL);
2683
2684 /* This uses %s in part to handle %'s in the text, but also to avoid
2685 a gcc error: the format attribute requires a string literal. */
2686 fprintf_unfiltered (gdb_stdlog, "%s", text);
2687
2688 xfree (status_string);
2689 xfree (text);
2690 ui_file_delete (tmp_stream);
2691 }
2692
2693 /* Prepare and stabilize the inferior for detaching it. E.g.,
2694 detaching while a thread is displaced stepping is a recipe for
2695 crashing it, as nothing would readjust the PC out of the scratch
2696 pad. */
2697
2698 void
2699 prepare_for_detach (void)
2700 {
2701 struct inferior *inf = current_inferior ();
2702 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2703 struct cleanup *old_chain_1;
2704 struct displaced_step_inferior_state *displaced;
2705
2706 displaced = get_displaced_stepping_state (inf->pid);
2707
2708 /* Is any thread of this process displaced stepping? If not,
2709 there's nothing else to do. */
2710 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2711 return;
2712
2713 if (debug_infrun)
2714 fprintf_unfiltered (gdb_stdlog,
2715 "displaced-stepping in-process while detaching");
2716
2717 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2718 inf->detaching = 1;
2719
2720 while (!ptid_equal (displaced->step_ptid, null_ptid))
2721 {
2722 struct cleanup *old_chain_2;
2723 struct execution_control_state ecss;
2724 struct execution_control_state *ecs;
2725
2726 ecs = &ecss;
2727 memset (ecs, 0, sizeof (*ecs));
2728
2729 overlay_cache_invalid = 1;
2730 /* Flush target cache before starting to handle each event.
2731 Target was running and cache could be stale. This is just a
2732 heuristic. Running threads may modify target memory, but we
2733 don't get any event. */
2734 target_dcache_invalidate ();
2735
2736 if (deprecated_target_wait_hook)
2737 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2738 else
2739 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2740
2741 if (debug_infrun)
2742 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2743
2744 /* If an error happens while handling the event, propagate GDB's
2745 knowledge of the executing state to the frontend/user running
2746 state. */
2747 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2748 &minus_one_ptid);
2749
2750 /* Now figure out what to do with the result of the result. */
2751 handle_inferior_event (ecs);
2752
2753 /* No error, don't finish the state yet. */
2754 discard_cleanups (old_chain_2);
2755
2756 /* Breakpoints and watchpoints are not installed on the target
2757 at this point, and signals are passed directly to the
2758 inferior, so this must mean the process is gone. */
2759 if (!ecs->wait_some_more)
2760 {
2761 discard_cleanups (old_chain_1);
2762 error (_("Program exited while detaching"));
2763 }
2764 }
2765
2766 discard_cleanups (old_chain_1);
2767 }
2768
2769 /* Wait for control to return from inferior to debugger.
2770
2771 If inferior gets a signal, we may decide to start it up again
2772 instead of returning. That is why there is a loop in this function.
2773 When this function actually returns it means the inferior
2774 should be left stopped and GDB should read more commands. */
2775
2776 void
2777 wait_for_inferior (void)
2778 {
2779 struct cleanup *old_cleanups;
2780
2781 if (debug_infrun)
2782 fprintf_unfiltered
2783 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2784
2785 old_cleanups =
2786 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2787
2788 while (1)
2789 {
2790 struct execution_control_state ecss;
2791 struct execution_control_state *ecs = &ecss;
2792 struct cleanup *old_chain;
2793
2794 memset (ecs, 0, sizeof (*ecs));
2795
2796 overlay_cache_invalid = 1;
2797
2798 /* Flush target cache before starting to handle each event.
2799 Target was running and cache could be stale. This is just a
2800 heuristic. Running threads may modify target memory, but we
2801 don't get any event. */
2802 target_dcache_invalidate ();
2803
2804 if (deprecated_target_wait_hook)
2805 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2806 else
2807 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2808
2809 if (debug_infrun)
2810 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2811
2812 /* If an error happens while handling the event, propagate GDB's
2813 knowledge of the executing state to the frontend/user running
2814 state. */
2815 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2816
2817 /* Now figure out what to do with the result of the result. */
2818 handle_inferior_event (ecs);
2819
2820 /* No error, don't finish the state yet. */
2821 discard_cleanups (old_chain);
2822
2823 if (!ecs->wait_some_more)
2824 break;
2825 }
2826
2827 do_cleanups (old_cleanups);
2828 }
2829
2830 /* Asynchronous version of wait_for_inferior. It is called by the
2831 event loop whenever a change of state is detected on the file
2832 descriptor corresponding to the target. It can be called more than
2833 once to complete a single execution command. In such cases we need
2834 to keep the state in a global variable ECSS. If it is the last time
2835 that this function is called for a single execution command, then
2836 report to the user that the inferior has stopped, and do the
2837 necessary cleanups. */
2838
2839 void
2840 fetch_inferior_event (void *client_data)
2841 {
2842 struct execution_control_state ecss;
2843 struct execution_control_state *ecs = &ecss;
2844 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2845 struct cleanup *ts_old_chain;
2846 int was_sync = sync_execution;
2847 int cmd_done = 0;
2848
2849 memset (ecs, 0, sizeof (*ecs));
2850
2851 /* We're handling a live event, so make sure we're doing live
2852 debugging. If we're looking at traceframes while the target is
2853 running, we're going to need to get back to that mode after
2854 handling the event. */
2855 if (non_stop)
2856 {
2857 make_cleanup_restore_current_traceframe ();
2858 set_current_traceframe (-1);
2859 }
2860
2861 if (non_stop)
2862 /* In non-stop mode, the user/frontend should not notice a thread
2863 switch due to internal events. Make sure we reverse to the
2864 user selected thread and frame after handling the event and
2865 running any breakpoint commands. */
2866 make_cleanup_restore_current_thread ();
2867
2868 overlay_cache_invalid = 1;
2869 /* Flush target cache before starting to handle each event. Target
2870 was running and cache could be stale. This is just a heuristic.
2871 Running threads may modify target memory, but we don't get any
2872 event. */
2873 target_dcache_invalidate ();
2874
2875 make_cleanup_restore_integer (&execution_direction);
2876 execution_direction = target_execution_direction ();
2877
2878 if (deprecated_target_wait_hook)
2879 ecs->ptid =
2880 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2881 else
2882 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2883
2884 if (debug_infrun)
2885 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2886
2887 /* If an error happens while handling the event, propagate GDB's
2888 knowledge of the executing state to the frontend/user running
2889 state. */
2890 if (!non_stop)
2891 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2892 else
2893 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2894
2895 /* Get executed before make_cleanup_restore_current_thread above to apply
2896 still for the thread which has thrown the exception. */
2897 make_bpstat_clear_actions_cleanup ();
2898
2899 /* Now figure out what to do with the result of the result. */
2900 handle_inferior_event (ecs);
2901
2902 if (!ecs->wait_some_more)
2903 {
2904 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2905
2906 delete_step_thread_step_resume_breakpoint ();
2907
2908 /* We may not find an inferior if this was a process exit. */
2909 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2910 normal_stop ();
2911
2912 if (target_has_execution
2913 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2914 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2915 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2916 && ecs->event_thread->step_multi
2917 && ecs->event_thread->control.stop_step)
2918 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2919 else
2920 {
2921 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2922 cmd_done = 1;
2923 }
2924 }
2925
2926 /* No error, don't finish the thread states yet. */
2927 discard_cleanups (ts_old_chain);
2928
2929 /* Revert thread and frame. */
2930 do_cleanups (old_chain);
2931
2932 /* If the inferior was in sync execution mode, and now isn't,
2933 restore the prompt (a synchronous execution command has finished,
2934 and we're ready for input). */
2935 if (interpreter_async && was_sync && !sync_execution)
2936 display_gdb_prompt (0);
2937
2938 if (cmd_done
2939 && !was_sync
2940 && exec_done_display_p
2941 && (ptid_equal (inferior_ptid, null_ptid)
2942 || !is_running (inferior_ptid)))
2943 printf_unfiltered (_("completed.\n"));
2944 }
2945
2946 /* Record the frame and location we're currently stepping through. */
2947 void
2948 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2949 {
2950 struct thread_info *tp = inferior_thread ();
2951
2952 tp->control.step_frame_id = get_frame_id (frame);
2953 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2954
2955 tp->current_symtab = sal.symtab;
2956 tp->current_line = sal.line;
2957 }
2958
2959 /* Clear context switchable stepping state. */
2960
2961 void
2962 init_thread_stepping_state (struct thread_info *tss)
2963 {
2964 tss->stepping_over_breakpoint = 0;
2965 tss->step_after_step_resume_breakpoint = 0;
2966 }
2967
2968 /* Set the cached copy of the last ptid/waitstatus. */
2969
2970 static void
2971 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2972 {
2973 target_last_wait_ptid = ptid;
2974 target_last_waitstatus = status;
2975 }
2976
2977 /* Return the cached copy of the last pid/waitstatus returned by
2978 target_wait()/deprecated_target_wait_hook(). The data is actually
2979 cached by handle_inferior_event(), which gets called immediately
2980 after target_wait()/deprecated_target_wait_hook(). */
2981
2982 void
2983 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2984 {
2985 *ptidp = target_last_wait_ptid;
2986 *status = target_last_waitstatus;
2987 }
2988
2989 void
2990 nullify_last_target_wait_ptid (void)
2991 {
2992 target_last_wait_ptid = minus_one_ptid;
2993 }
2994
2995 /* Switch thread contexts. */
2996
2997 static void
2998 context_switch (ptid_t ptid)
2999 {
3000 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3001 {
3002 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3003 target_pid_to_str (inferior_ptid));
3004 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3005 target_pid_to_str (ptid));
3006 }
3007
3008 switch_to_thread (ptid);
3009 }
3010
3011 static void
3012 adjust_pc_after_break (struct execution_control_state *ecs)
3013 {
3014 struct regcache *regcache;
3015 struct gdbarch *gdbarch;
3016 struct address_space *aspace;
3017 CORE_ADDR breakpoint_pc, decr_pc;
3018
3019 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3020 we aren't, just return.
3021
3022 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3023 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3024 implemented by software breakpoints should be handled through the normal
3025 breakpoint layer.
3026
3027 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3028 different signals (SIGILL or SIGEMT for instance), but it is less
3029 clear where the PC is pointing afterwards. It may not match
3030 gdbarch_decr_pc_after_break. I don't know any specific target that
3031 generates these signals at breakpoints (the code has been in GDB since at
3032 least 1992) so I can not guess how to handle them here.
3033
3034 In earlier versions of GDB, a target with
3035 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3036 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3037 target with both of these set in GDB history, and it seems unlikely to be
3038 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3039
3040 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3041 return;
3042
3043 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3044 return;
3045
3046 /* In reverse execution, when a breakpoint is hit, the instruction
3047 under it has already been de-executed. The reported PC always
3048 points at the breakpoint address, so adjusting it further would
3049 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3050 architecture:
3051
3052 B1 0x08000000 : INSN1
3053 B2 0x08000001 : INSN2
3054 0x08000002 : INSN3
3055 PC -> 0x08000003 : INSN4
3056
3057 Say you're stopped at 0x08000003 as above. Reverse continuing
3058 from that point should hit B2 as below. Reading the PC when the
3059 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3060 been de-executed already.
3061
3062 B1 0x08000000 : INSN1
3063 B2 PC -> 0x08000001 : INSN2
3064 0x08000002 : INSN3
3065 0x08000003 : INSN4
3066
3067 We can't apply the same logic as for forward execution, because
3068 we would wrongly adjust the PC to 0x08000000, since there's a
3069 breakpoint at PC - 1. We'd then report a hit on B1, although
3070 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3071 behaviour. */
3072 if (execution_direction == EXEC_REVERSE)
3073 return;
3074
3075 /* If this target does not decrement the PC after breakpoints, then
3076 we have nothing to do. */
3077 regcache = get_thread_regcache (ecs->ptid);
3078 gdbarch = get_regcache_arch (regcache);
3079
3080 decr_pc = target_decr_pc_after_break (gdbarch);
3081 if (decr_pc == 0)
3082 return;
3083
3084 aspace = get_regcache_aspace (regcache);
3085
3086 /* Find the location where (if we've hit a breakpoint) the
3087 breakpoint would be. */
3088 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3089
3090 /* Check whether there actually is a software breakpoint inserted at
3091 that location.
3092
3093 If in non-stop mode, a race condition is possible where we've
3094 removed a breakpoint, but stop events for that breakpoint were
3095 already queued and arrive later. To suppress those spurious
3096 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3097 and retire them after a number of stop events are reported. */
3098 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3099 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3100 {
3101 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3102
3103 if (record_full_is_used ())
3104 record_full_gdb_operation_disable_set ();
3105
3106 /* When using hardware single-step, a SIGTRAP is reported for both
3107 a completed single-step and a software breakpoint. Need to
3108 differentiate between the two, as the latter needs adjusting
3109 but the former does not.
3110
3111 The SIGTRAP can be due to a completed hardware single-step only if
3112 - we didn't insert software single-step breakpoints
3113 - the thread to be examined is still the current thread
3114 - this thread is currently being stepped
3115
3116 If any of these events did not occur, we must have stopped due
3117 to hitting a software breakpoint, and have to back up to the
3118 breakpoint address.
3119
3120 As a special case, we could have hardware single-stepped a
3121 software breakpoint. In this case (prev_pc == breakpoint_pc),
3122 we also need to back up to the breakpoint address. */
3123
3124 if (singlestep_breakpoints_inserted_p
3125 || !ptid_equal (ecs->ptid, inferior_ptid)
3126 || !currently_stepping (ecs->event_thread)
3127 || ecs->event_thread->prev_pc == breakpoint_pc)
3128 regcache_write_pc (regcache, breakpoint_pc);
3129
3130 do_cleanups (old_cleanups);
3131 }
3132 }
3133
3134 static void
3135 init_infwait_state (void)
3136 {
3137 waiton_ptid = pid_to_ptid (-1);
3138 infwait_state = infwait_normal_state;
3139 }
3140
3141 static int
3142 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3143 {
3144 for (frame = get_prev_frame (frame);
3145 frame != NULL;
3146 frame = get_prev_frame (frame))
3147 {
3148 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3149 return 1;
3150 if (get_frame_type (frame) != INLINE_FRAME)
3151 break;
3152 }
3153
3154 return 0;
3155 }
3156
3157 /* Auxiliary function that handles syscall entry/return events.
3158 It returns 1 if the inferior should keep going (and GDB
3159 should ignore the event), or 0 if the event deserves to be
3160 processed. */
3161
3162 static int
3163 handle_syscall_event (struct execution_control_state *ecs)
3164 {
3165 struct regcache *regcache;
3166 int syscall_number;
3167
3168 if (!ptid_equal (ecs->ptid, inferior_ptid))
3169 context_switch (ecs->ptid);
3170
3171 regcache = get_thread_regcache (ecs->ptid);
3172 syscall_number = ecs->ws.value.syscall_number;
3173 stop_pc = regcache_read_pc (regcache);
3174
3175 if (catch_syscall_enabled () > 0
3176 && catching_syscall_number (syscall_number) > 0)
3177 {
3178 if (debug_infrun)
3179 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3180 syscall_number);
3181
3182 ecs->event_thread->control.stop_bpstat
3183 = bpstat_stop_status (get_regcache_aspace (regcache),
3184 stop_pc, ecs->ptid, &ecs->ws);
3185
3186 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3187 {
3188 /* Catchpoint hit. */
3189 return 0;
3190 }
3191 }
3192
3193 /* If no catchpoint triggered for this, then keep going. */
3194 keep_going (ecs);
3195 return 1;
3196 }
3197
3198 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3199
3200 static void
3201 fill_in_stop_func (struct gdbarch *gdbarch,
3202 struct execution_control_state *ecs)
3203 {
3204 if (!ecs->stop_func_filled_in)
3205 {
3206 /* Don't care about return value; stop_func_start and stop_func_name
3207 will both be 0 if it doesn't work. */
3208 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3209 &ecs->stop_func_start, &ecs->stop_func_end);
3210 ecs->stop_func_start
3211 += gdbarch_deprecated_function_start_offset (gdbarch);
3212
3213 if (gdbarch_skip_entrypoint_p (gdbarch))
3214 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3215 ecs->stop_func_start);
3216
3217 ecs->stop_func_filled_in = 1;
3218 }
3219 }
3220
3221
3222 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3223
3224 static enum stop_kind
3225 get_inferior_stop_soon (ptid_t ptid)
3226 {
3227 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3228
3229 gdb_assert (inf != NULL);
3230 return inf->control.stop_soon;
3231 }
3232
3233 /* Given an execution control state that has been freshly filled in by
3234 an event from the inferior, figure out what it means and take
3235 appropriate action.
3236
3237 The alternatives are:
3238
3239 1) stop_stepping and return; to really stop and return to the
3240 debugger.
3241
3242 2) keep_going and return; to wait for the next event (set
3243 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3244 once). */
3245
3246 static void
3247 handle_inferior_event (struct execution_control_state *ecs)
3248 {
3249 enum stop_kind stop_soon;
3250
3251 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3252 {
3253 /* We had an event in the inferior, but we are not interested in
3254 handling it at this level. The lower layers have already
3255 done what needs to be done, if anything.
3256
3257 One of the possible circumstances for this is when the
3258 inferior produces output for the console. The inferior has
3259 not stopped, and we are ignoring the event. Another possible
3260 circumstance is any event which the lower level knows will be
3261 reported multiple times without an intervening resume. */
3262 if (debug_infrun)
3263 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3264 prepare_to_wait (ecs);
3265 return;
3266 }
3267
3268 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3269 && target_can_async_p () && !sync_execution)
3270 {
3271 /* There were no unwaited-for children left in the target, but,
3272 we're not synchronously waiting for events either. Just
3273 ignore. Otherwise, if we were running a synchronous
3274 execution command, we need to cancel it and give the user
3275 back the terminal. */
3276 if (debug_infrun)
3277 fprintf_unfiltered (gdb_stdlog,
3278 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3279 prepare_to_wait (ecs);
3280 return;
3281 }
3282
3283 /* Cache the last pid/waitstatus. */
3284 set_last_target_status (ecs->ptid, ecs->ws);
3285
3286 /* Always clear state belonging to the previous time we stopped. */
3287 stop_stack_dummy = STOP_NONE;
3288
3289 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3290 {
3291 /* No unwaited-for children left. IOW, all resumed children
3292 have exited. */
3293 if (debug_infrun)
3294 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3295
3296 stop_print_frame = 0;
3297 stop_stepping (ecs);
3298 return;
3299 }
3300
3301 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3302 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3303 {
3304 ecs->event_thread = find_thread_ptid (ecs->ptid);
3305 /* If it's a new thread, add it to the thread database. */
3306 if (ecs->event_thread == NULL)
3307 ecs->event_thread = add_thread (ecs->ptid);
3308
3309 /* Disable range stepping. If the next step request could use a
3310 range, this will be end up re-enabled then. */
3311 ecs->event_thread->control.may_range_step = 0;
3312 }
3313
3314 /* Dependent on valid ECS->EVENT_THREAD. */
3315 adjust_pc_after_break (ecs);
3316
3317 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3318 reinit_frame_cache ();
3319
3320 breakpoint_retire_moribund ();
3321
3322 /* First, distinguish signals caused by the debugger from signals
3323 that have to do with the program's own actions. Note that
3324 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3325 on the operating system version. Here we detect when a SIGILL or
3326 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3327 something similar for SIGSEGV, since a SIGSEGV will be generated
3328 when we're trying to execute a breakpoint instruction on a
3329 non-executable stack. This happens for call dummy breakpoints
3330 for architectures like SPARC that place call dummies on the
3331 stack. */
3332 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3333 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3334 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3335 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3336 {
3337 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3338
3339 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3340 regcache_read_pc (regcache)))
3341 {
3342 if (debug_infrun)
3343 fprintf_unfiltered (gdb_stdlog,
3344 "infrun: Treating signal as SIGTRAP\n");
3345 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3346 }
3347 }
3348
3349 /* Mark the non-executing threads accordingly. In all-stop, all
3350 threads of all processes are stopped when we get any event
3351 reported. In non-stop mode, only the event thread stops. If
3352 we're handling a process exit in non-stop mode, there's nothing
3353 to do, as threads of the dead process are gone, and threads of
3354 any other process were left running. */
3355 if (!non_stop)
3356 set_executing (minus_one_ptid, 0);
3357 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3358 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3359 set_executing (ecs->ptid, 0);
3360
3361 switch (infwait_state)
3362 {
3363 case infwait_normal_state:
3364 if (debug_infrun)
3365 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3366 break;
3367
3368 case infwait_step_watch_state:
3369 if (debug_infrun)
3370 fprintf_unfiltered (gdb_stdlog,
3371 "infrun: infwait_step_watch_state\n");
3372
3373 ecs->stepped_after_stopped_by_watchpoint = 1;
3374 break;
3375
3376 case infwait_nonstep_watch_state:
3377 if (debug_infrun)
3378 fprintf_unfiltered (gdb_stdlog,
3379 "infrun: infwait_nonstep_watch_state\n");
3380 insert_breakpoints ();
3381
3382 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3383 handle things like signals arriving and other things happening
3384 in combination correctly? */
3385 ecs->stepped_after_stopped_by_watchpoint = 1;
3386 break;
3387
3388 default:
3389 internal_error (__FILE__, __LINE__, _("bad switch"));
3390 }
3391
3392 infwait_state = infwait_normal_state;
3393 waiton_ptid = pid_to_ptid (-1);
3394
3395 switch (ecs->ws.kind)
3396 {
3397 case TARGET_WAITKIND_LOADED:
3398 if (debug_infrun)
3399 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3400 if (!ptid_equal (ecs->ptid, inferior_ptid))
3401 context_switch (ecs->ptid);
3402 /* Ignore gracefully during startup of the inferior, as it might
3403 be the shell which has just loaded some objects, otherwise
3404 add the symbols for the newly loaded objects. Also ignore at
3405 the beginning of an attach or remote session; we will query
3406 the full list of libraries once the connection is
3407 established. */
3408
3409 stop_soon = get_inferior_stop_soon (ecs->ptid);
3410 if (stop_soon == NO_STOP_QUIETLY)
3411 {
3412 struct regcache *regcache;
3413
3414 regcache = get_thread_regcache (ecs->ptid);
3415
3416 handle_solib_event ();
3417
3418 ecs->event_thread->control.stop_bpstat
3419 = bpstat_stop_status (get_regcache_aspace (regcache),
3420 stop_pc, ecs->ptid, &ecs->ws);
3421
3422 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3423 {
3424 /* A catchpoint triggered. */
3425 process_event_stop_test (ecs);
3426 return;
3427 }
3428
3429 /* If requested, stop when the dynamic linker notifies
3430 gdb of events. This allows the user to get control
3431 and place breakpoints in initializer routines for
3432 dynamically loaded objects (among other things). */
3433 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3434 if (stop_on_solib_events)
3435 {
3436 /* Make sure we print "Stopped due to solib-event" in
3437 normal_stop. */
3438 stop_print_frame = 1;
3439
3440 stop_stepping (ecs);
3441 return;
3442 }
3443 }
3444
3445 /* If we are skipping through a shell, or through shared library
3446 loading that we aren't interested in, resume the program. If
3447 we're running the program normally, also resume. */
3448 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3449 {
3450 /* Loading of shared libraries might have changed breakpoint
3451 addresses. Make sure new breakpoints are inserted. */
3452 if (stop_soon == NO_STOP_QUIETLY
3453 && !breakpoints_always_inserted_mode ())
3454 insert_breakpoints ();
3455 resume (0, GDB_SIGNAL_0);
3456 prepare_to_wait (ecs);
3457 return;
3458 }
3459
3460 /* But stop if we're attaching or setting up a remote
3461 connection. */
3462 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3463 || stop_soon == STOP_QUIETLY_REMOTE)
3464 {
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3467 stop_stepping (ecs);
3468 return;
3469 }
3470
3471 internal_error (__FILE__, __LINE__,
3472 _("unhandled stop_soon: %d"), (int) stop_soon);
3473
3474 case TARGET_WAITKIND_SPURIOUS:
3475 if (debug_infrun)
3476 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3477 if (!ptid_equal (ecs->ptid, inferior_ptid))
3478 context_switch (ecs->ptid);
3479 resume (0, GDB_SIGNAL_0);
3480 prepare_to_wait (ecs);
3481 return;
3482
3483 case TARGET_WAITKIND_EXITED:
3484 case TARGET_WAITKIND_SIGNALLED:
3485 if (debug_infrun)
3486 {
3487 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3488 fprintf_unfiltered (gdb_stdlog,
3489 "infrun: TARGET_WAITKIND_EXITED\n");
3490 else
3491 fprintf_unfiltered (gdb_stdlog,
3492 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3493 }
3494
3495 inferior_ptid = ecs->ptid;
3496 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3497 set_current_program_space (current_inferior ()->pspace);
3498 handle_vfork_child_exec_or_exit (0);
3499 target_terminal_ours (); /* Must do this before mourn anyway. */
3500
3501 /* Clearing any previous state of convenience variables. */
3502 clear_exit_convenience_vars ();
3503
3504 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3505 {
3506 /* Record the exit code in the convenience variable $_exitcode, so
3507 that the user can inspect this again later. */
3508 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3509 (LONGEST) ecs->ws.value.integer);
3510
3511 /* Also record this in the inferior itself. */
3512 current_inferior ()->has_exit_code = 1;
3513 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3514
3515 print_exited_reason (ecs->ws.value.integer);
3516 }
3517 else
3518 {
3519 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3520 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3521
3522 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3523 {
3524 /* Set the value of the internal variable $_exitsignal,
3525 which holds the signal uncaught by the inferior. */
3526 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3527 gdbarch_gdb_signal_to_target (gdbarch,
3528 ecs->ws.value.sig));
3529 }
3530 else
3531 {
3532 /* We don't have access to the target's method used for
3533 converting between signal numbers (GDB's internal
3534 representation <-> target's representation).
3535 Therefore, we cannot do a good job at displaying this
3536 information to the user. It's better to just warn
3537 her about it (if infrun debugging is enabled), and
3538 give up. */
3539 if (debug_infrun)
3540 fprintf_filtered (gdb_stdlog, _("\
3541 Cannot fill $_exitsignal with the correct signal number.\n"));
3542 }
3543
3544 print_signal_exited_reason (ecs->ws.value.sig);
3545 }
3546
3547 gdb_flush (gdb_stdout);
3548 target_mourn_inferior ();
3549 singlestep_breakpoints_inserted_p = 0;
3550 cancel_single_step_breakpoints ();
3551 stop_print_frame = 0;
3552 stop_stepping (ecs);
3553 return;
3554
3555 /* The following are the only cases in which we keep going;
3556 the above cases end in a continue or goto. */
3557 case TARGET_WAITKIND_FORKED:
3558 case TARGET_WAITKIND_VFORKED:
3559 if (debug_infrun)
3560 {
3561 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3562 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3563 else
3564 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3565 }
3566
3567 /* Check whether the inferior is displaced stepping. */
3568 {
3569 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3570 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3571 struct displaced_step_inferior_state *displaced
3572 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3573
3574 /* If checking displaced stepping is supported, and thread
3575 ecs->ptid is displaced stepping. */
3576 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3577 {
3578 struct inferior *parent_inf
3579 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3580 struct regcache *child_regcache;
3581 CORE_ADDR parent_pc;
3582
3583 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3584 indicating that the displaced stepping of syscall instruction
3585 has been done. Perform cleanup for parent process here. Note
3586 that this operation also cleans up the child process for vfork,
3587 because their pages are shared. */
3588 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3589
3590 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3591 {
3592 /* Restore scratch pad for child process. */
3593 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3594 }
3595
3596 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3597 the child's PC is also within the scratchpad. Set the child's PC
3598 to the parent's PC value, which has already been fixed up.
3599 FIXME: we use the parent's aspace here, although we're touching
3600 the child, because the child hasn't been added to the inferior
3601 list yet at this point. */
3602
3603 child_regcache
3604 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3605 gdbarch,
3606 parent_inf->aspace);
3607 /* Read PC value of parent process. */
3608 parent_pc = regcache_read_pc (regcache);
3609
3610 if (debug_displaced)
3611 fprintf_unfiltered (gdb_stdlog,
3612 "displaced: write child pc from %s to %s\n",
3613 paddress (gdbarch,
3614 regcache_read_pc (child_regcache)),
3615 paddress (gdbarch, parent_pc));
3616
3617 regcache_write_pc (child_regcache, parent_pc);
3618 }
3619 }
3620
3621 if (!ptid_equal (ecs->ptid, inferior_ptid))
3622 context_switch (ecs->ptid);
3623
3624 /* Immediately detach breakpoints from the child before there's
3625 any chance of letting the user delete breakpoints from the
3626 breakpoint lists. If we don't do this early, it's easy to
3627 leave left over traps in the child, vis: "break foo; catch
3628 fork; c; <fork>; del; c; <child calls foo>". We only follow
3629 the fork on the last `continue', and by that time the
3630 breakpoint at "foo" is long gone from the breakpoint table.
3631 If we vforked, then we don't need to unpatch here, since both
3632 parent and child are sharing the same memory pages; we'll
3633 need to unpatch at follow/detach time instead to be certain
3634 that new breakpoints added between catchpoint hit time and
3635 vfork follow are detached. */
3636 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3637 {
3638 /* This won't actually modify the breakpoint list, but will
3639 physically remove the breakpoints from the child. */
3640 detach_breakpoints (ecs->ws.value.related_pid);
3641 }
3642
3643 if (singlestep_breakpoints_inserted_p)
3644 {
3645 /* Pull the single step breakpoints out of the target. */
3646 remove_single_step_breakpoints ();
3647 singlestep_breakpoints_inserted_p = 0;
3648 }
3649
3650 /* In case the event is caught by a catchpoint, remember that
3651 the event is to be followed at the next resume of the thread,
3652 and not immediately. */
3653 ecs->event_thread->pending_follow = ecs->ws;
3654
3655 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3656
3657 ecs->event_thread->control.stop_bpstat
3658 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3659 stop_pc, ecs->ptid, &ecs->ws);
3660
3661 /* If no catchpoint triggered for this, then keep going. Note
3662 that we're interested in knowing the bpstat actually causes a
3663 stop, not just if it may explain the signal. Software
3664 watchpoints, for example, always appear in the bpstat. */
3665 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3666 {
3667 ptid_t parent;
3668 ptid_t child;
3669 int should_resume;
3670 int follow_child
3671 = (follow_fork_mode_string == follow_fork_mode_child);
3672
3673 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3674
3675 should_resume = follow_fork ();
3676
3677 parent = ecs->ptid;
3678 child = ecs->ws.value.related_pid;
3679
3680 /* In non-stop mode, also resume the other branch. */
3681 if (non_stop && !detach_fork)
3682 {
3683 if (follow_child)
3684 switch_to_thread (parent);
3685 else
3686 switch_to_thread (child);
3687
3688 ecs->event_thread = inferior_thread ();
3689 ecs->ptid = inferior_ptid;
3690 keep_going (ecs);
3691 }
3692
3693 if (follow_child)
3694 switch_to_thread (child);
3695 else
3696 switch_to_thread (parent);
3697
3698 ecs->event_thread = inferior_thread ();
3699 ecs->ptid = inferior_ptid;
3700
3701 if (should_resume)
3702 keep_going (ecs);
3703 else
3704 stop_stepping (ecs);
3705 return;
3706 }
3707 process_event_stop_test (ecs);
3708 return;
3709
3710 case TARGET_WAITKIND_VFORK_DONE:
3711 /* Done with the shared memory region. Re-insert breakpoints in
3712 the parent, and keep going. */
3713
3714 if (debug_infrun)
3715 fprintf_unfiltered (gdb_stdlog,
3716 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3717
3718 if (!ptid_equal (ecs->ptid, inferior_ptid))
3719 context_switch (ecs->ptid);
3720
3721 current_inferior ()->waiting_for_vfork_done = 0;
3722 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3723 /* This also takes care of reinserting breakpoints in the
3724 previously locked inferior. */
3725 keep_going (ecs);
3726 return;
3727
3728 case TARGET_WAITKIND_EXECD:
3729 if (debug_infrun)
3730 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3731
3732 if (!ptid_equal (ecs->ptid, inferior_ptid))
3733 context_switch (ecs->ptid);
3734
3735 singlestep_breakpoints_inserted_p = 0;
3736 cancel_single_step_breakpoints ();
3737
3738 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3739
3740 /* Do whatever is necessary to the parent branch of the vfork. */
3741 handle_vfork_child_exec_or_exit (1);
3742
3743 /* This causes the eventpoints and symbol table to be reset.
3744 Must do this now, before trying to determine whether to
3745 stop. */
3746 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3747
3748 ecs->event_thread->control.stop_bpstat
3749 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3750 stop_pc, ecs->ptid, &ecs->ws);
3751
3752 /* Note that this may be referenced from inside
3753 bpstat_stop_status above, through inferior_has_execd. */
3754 xfree (ecs->ws.value.execd_pathname);
3755 ecs->ws.value.execd_pathname = NULL;
3756
3757 /* If no catchpoint triggered for this, then keep going. */
3758 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3759 {
3760 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3761 keep_going (ecs);
3762 return;
3763 }
3764 process_event_stop_test (ecs);
3765 return;
3766
3767 /* Be careful not to try to gather much state about a thread
3768 that's in a syscall. It's frequently a losing proposition. */
3769 case TARGET_WAITKIND_SYSCALL_ENTRY:
3770 if (debug_infrun)
3771 fprintf_unfiltered (gdb_stdlog,
3772 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3773 /* Getting the current syscall number. */
3774 if (handle_syscall_event (ecs) == 0)
3775 process_event_stop_test (ecs);
3776 return;
3777
3778 /* Before examining the threads further, step this thread to
3779 get it entirely out of the syscall. (We get notice of the
3780 event when the thread is just on the verge of exiting a
3781 syscall. Stepping one instruction seems to get it back
3782 into user code.) */
3783 case TARGET_WAITKIND_SYSCALL_RETURN:
3784 if (debug_infrun)
3785 fprintf_unfiltered (gdb_stdlog,
3786 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3787 if (handle_syscall_event (ecs) == 0)
3788 process_event_stop_test (ecs);
3789 return;
3790
3791 case TARGET_WAITKIND_STOPPED:
3792 if (debug_infrun)
3793 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3794 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3795 handle_signal_stop (ecs);
3796 return;
3797
3798 case TARGET_WAITKIND_NO_HISTORY:
3799 if (debug_infrun)
3800 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3801 /* Reverse execution: target ran out of history info. */
3802
3803 /* Pull the single step breakpoints out of the target. */
3804 if (singlestep_breakpoints_inserted_p)
3805 {
3806 if (!ptid_equal (ecs->ptid, inferior_ptid))
3807 context_switch (ecs->ptid);
3808 remove_single_step_breakpoints ();
3809 singlestep_breakpoints_inserted_p = 0;
3810 }
3811 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3812 print_no_history_reason ();
3813 stop_stepping (ecs);
3814 return;
3815 }
3816 }
3817
3818 /* Come here when the program has stopped with a signal. */
3819
3820 static void
3821 handle_signal_stop (struct execution_control_state *ecs)
3822 {
3823 struct frame_info *frame;
3824 struct gdbarch *gdbarch;
3825 int stopped_by_watchpoint;
3826 enum stop_kind stop_soon;
3827 int random_signal;
3828
3829 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3830
3831 /* Do we need to clean up the state of a thread that has
3832 completed a displaced single-step? (Doing so usually affects
3833 the PC, so do it here, before we set stop_pc.) */
3834 displaced_step_fixup (ecs->ptid,
3835 ecs->event_thread->suspend.stop_signal);
3836
3837 /* If we either finished a single-step or hit a breakpoint, but
3838 the user wanted this thread to be stopped, pretend we got a
3839 SIG0 (generic unsignaled stop). */
3840 if (ecs->event_thread->stop_requested
3841 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3842 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3843
3844 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3845
3846 if (debug_infrun)
3847 {
3848 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3849 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3850 struct cleanup *old_chain = save_inferior_ptid ();
3851
3852 inferior_ptid = ecs->ptid;
3853
3854 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3855 paddress (gdbarch, stop_pc));
3856 if (target_stopped_by_watchpoint ())
3857 {
3858 CORE_ADDR addr;
3859
3860 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3861
3862 if (target_stopped_data_address (&current_target, &addr))
3863 fprintf_unfiltered (gdb_stdlog,
3864 "infrun: stopped data address = %s\n",
3865 paddress (gdbarch, addr));
3866 else
3867 fprintf_unfiltered (gdb_stdlog,
3868 "infrun: (no data address available)\n");
3869 }
3870
3871 do_cleanups (old_chain);
3872 }
3873
3874 /* This is originated from start_remote(), start_inferior() and
3875 shared libraries hook functions. */
3876 stop_soon = get_inferior_stop_soon (ecs->ptid);
3877 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3878 {
3879 if (!ptid_equal (ecs->ptid, inferior_ptid))
3880 context_switch (ecs->ptid);
3881 if (debug_infrun)
3882 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3883 stop_print_frame = 1;
3884 stop_stepping (ecs);
3885 return;
3886 }
3887
3888 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3889 && stop_after_trap)
3890 {
3891 if (!ptid_equal (ecs->ptid, inferior_ptid))
3892 context_switch (ecs->ptid);
3893 if (debug_infrun)
3894 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3895 stop_print_frame = 0;
3896 stop_stepping (ecs);
3897 return;
3898 }
3899
3900 /* This originates from attach_command(). We need to overwrite
3901 the stop_signal here, because some kernels don't ignore a
3902 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3903 See more comments in inferior.h. On the other hand, if we
3904 get a non-SIGSTOP, report it to the user - assume the backend
3905 will handle the SIGSTOP if it should show up later.
3906
3907 Also consider that the attach is complete when we see a
3908 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3909 target extended-remote report it instead of a SIGSTOP
3910 (e.g. gdbserver). We already rely on SIGTRAP being our
3911 signal, so this is no exception.
3912
3913 Also consider that the attach is complete when we see a
3914 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3915 the target to stop all threads of the inferior, in case the
3916 low level attach operation doesn't stop them implicitly. If
3917 they weren't stopped implicitly, then the stub will report a
3918 GDB_SIGNAL_0, meaning: stopped for no particular reason
3919 other than GDB's request. */
3920 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3921 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3922 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3923 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3924 {
3925 stop_print_frame = 1;
3926 stop_stepping (ecs);
3927 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3928 return;
3929 }
3930
3931 /* See if something interesting happened to the non-current thread. If
3932 so, then switch to that thread. */
3933 if (!ptid_equal (ecs->ptid, inferior_ptid))
3934 {
3935 if (debug_infrun)
3936 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3937
3938 context_switch (ecs->ptid);
3939
3940 if (deprecated_context_hook)
3941 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3942 }
3943
3944 /* At this point, get hold of the now-current thread's frame. */
3945 frame = get_current_frame ();
3946 gdbarch = get_frame_arch (frame);
3947
3948 /* Pull the single step breakpoints out of the target. */
3949 if (singlestep_breakpoints_inserted_p)
3950 {
3951 /* However, before doing so, if this single-step breakpoint was
3952 actually for another thread, set this thread up for moving
3953 past it. */
3954 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3955 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3956 {
3957 struct regcache *regcache;
3958 struct address_space *aspace;
3959 CORE_ADDR pc;
3960
3961 regcache = get_thread_regcache (ecs->ptid);
3962 aspace = get_regcache_aspace (regcache);
3963 pc = regcache_read_pc (regcache);
3964 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3965 {
3966 if (debug_infrun)
3967 {
3968 fprintf_unfiltered (gdb_stdlog,
3969 "infrun: [%s] hit step over single-step"
3970 " breakpoint of [%s]\n",
3971 target_pid_to_str (ecs->ptid),
3972 target_pid_to_str (singlestep_ptid));
3973 }
3974 ecs->hit_singlestep_breakpoint = 1;
3975 }
3976 }
3977
3978 remove_single_step_breakpoints ();
3979 singlestep_breakpoints_inserted_p = 0;
3980 }
3981
3982 if (ecs->stepped_after_stopped_by_watchpoint)
3983 stopped_by_watchpoint = 0;
3984 else
3985 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3986
3987 /* If necessary, step over this watchpoint. We'll be back to display
3988 it in a moment. */
3989 if (stopped_by_watchpoint
3990 && (target_have_steppable_watchpoint
3991 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3992 {
3993 /* At this point, we are stopped at an instruction which has
3994 attempted to write to a piece of memory under control of
3995 a watchpoint. The instruction hasn't actually executed
3996 yet. If we were to evaluate the watchpoint expression
3997 now, we would get the old value, and therefore no change
3998 would seem to have occurred.
3999
4000 In order to make watchpoints work `right', we really need
4001 to complete the memory write, and then evaluate the
4002 watchpoint expression. We do this by single-stepping the
4003 target.
4004
4005 It may not be necessary to disable the watchpoint to stop over
4006 it. For example, the PA can (with some kernel cooperation)
4007 single step over a watchpoint without disabling the watchpoint.
4008
4009 It is far more common to need to disable a watchpoint to step
4010 the inferior over it. If we have non-steppable watchpoints,
4011 we must disable the current watchpoint; it's simplest to
4012 disable all watchpoints and breakpoints. */
4013 int hw_step = 1;
4014
4015 if (!target_have_steppable_watchpoint)
4016 {
4017 remove_breakpoints ();
4018 /* See comment in resume why we need to stop bypassing signals
4019 while breakpoints have been removed. */
4020 target_pass_signals (0, NULL);
4021 }
4022 /* Single step */
4023 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4024 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4025 waiton_ptid = ecs->ptid;
4026 if (target_have_steppable_watchpoint)
4027 infwait_state = infwait_step_watch_state;
4028 else
4029 infwait_state = infwait_nonstep_watch_state;
4030 prepare_to_wait (ecs);
4031 return;
4032 }
4033
4034 ecs->event_thread->stepping_over_breakpoint = 0;
4035 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4036 ecs->event_thread->control.stop_step = 0;
4037 stop_print_frame = 1;
4038 stopped_by_random_signal = 0;
4039
4040 /* Hide inlined functions starting here, unless we just performed stepi or
4041 nexti. After stepi and nexti, always show the innermost frame (not any
4042 inline function call sites). */
4043 if (ecs->event_thread->control.step_range_end != 1)
4044 {
4045 struct address_space *aspace =
4046 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4047
4048 /* skip_inline_frames is expensive, so we avoid it if we can
4049 determine that the address is one where functions cannot have
4050 been inlined. This improves performance with inferiors that
4051 load a lot of shared libraries, because the solib event
4052 breakpoint is defined as the address of a function (i.e. not
4053 inline). Note that we have to check the previous PC as well
4054 as the current one to catch cases when we have just
4055 single-stepped off a breakpoint prior to reinstating it.
4056 Note that we're assuming that the code we single-step to is
4057 not inline, but that's not definitive: there's nothing
4058 preventing the event breakpoint function from containing
4059 inlined code, and the single-step ending up there. If the
4060 user had set a breakpoint on that inlined code, the missing
4061 skip_inline_frames call would break things. Fortunately
4062 that's an extremely unlikely scenario. */
4063 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4064 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4065 && ecs->event_thread->control.trap_expected
4066 && pc_at_non_inline_function (aspace,
4067 ecs->event_thread->prev_pc,
4068 &ecs->ws)))
4069 {
4070 skip_inline_frames (ecs->ptid);
4071
4072 /* Re-fetch current thread's frame in case that invalidated
4073 the frame cache. */
4074 frame = get_current_frame ();
4075 gdbarch = get_frame_arch (frame);
4076 }
4077 }
4078
4079 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4080 && ecs->event_thread->control.trap_expected
4081 && gdbarch_single_step_through_delay_p (gdbarch)
4082 && currently_stepping (ecs->event_thread))
4083 {
4084 /* We're trying to step off a breakpoint. Turns out that we're
4085 also on an instruction that needs to be stepped multiple
4086 times before it's been fully executing. E.g., architectures
4087 with a delay slot. It needs to be stepped twice, once for
4088 the instruction and once for the delay slot. */
4089 int step_through_delay
4090 = gdbarch_single_step_through_delay (gdbarch, frame);
4091
4092 if (debug_infrun && step_through_delay)
4093 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4094 if (ecs->event_thread->control.step_range_end == 0
4095 && step_through_delay)
4096 {
4097 /* The user issued a continue when stopped at a breakpoint.
4098 Set up for another trap and get out of here. */
4099 ecs->event_thread->stepping_over_breakpoint = 1;
4100 keep_going (ecs);
4101 return;
4102 }
4103 else if (step_through_delay)
4104 {
4105 /* The user issued a step when stopped at a breakpoint.
4106 Maybe we should stop, maybe we should not - the delay
4107 slot *might* correspond to a line of source. In any
4108 case, don't decide that here, just set
4109 ecs->stepping_over_breakpoint, making sure we
4110 single-step again before breakpoints are re-inserted. */
4111 ecs->event_thread->stepping_over_breakpoint = 1;
4112 }
4113 }
4114
4115 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4116 handles this event. */
4117 ecs->event_thread->control.stop_bpstat
4118 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4119 stop_pc, ecs->ptid, &ecs->ws);
4120
4121 /* Following in case break condition called a
4122 function. */
4123 stop_print_frame = 1;
4124
4125 /* This is where we handle "moribund" watchpoints. Unlike
4126 software breakpoints traps, hardware watchpoint traps are
4127 always distinguishable from random traps. If no high-level
4128 watchpoint is associated with the reported stop data address
4129 anymore, then the bpstat does not explain the signal ---
4130 simply make sure to ignore it if `stopped_by_watchpoint' is
4131 set. */
4132
4133 if (debug_infrun
4134 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4135 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4136 GDB_SIGNAL_TRAP)
4137 && stopped_by_watchpoint)
4138 fprintf_unfiltered (gdb_stdlog,
4139 "infrun: no user watchpoint explains "
4140 "watchpoint SIGTRAP, ignoring\n");
4141
4142 /* NOTE: cagney/2003-03-29: These checks for a random signal
4143 at one stage in the past included checks for an inferior
4144 function call's call dummy's return breakpoint. The original
4145 comment, that went with the test, read:
4146
4147 ``End of a stack dummy. Some systems (e.g. Sony news) give
4148 another signal besides SIGTRAP, so check here as well as
4149 above.''
4150
4151 If someone ever tries to get call dummys on a
4152 non-executable stack to work (where the target would stop
4153 with something like a SIGSEGV), then those tests might need
4154 to be re-instated. Given, however, that the tests were only
4155 enabled when momentary breakpoints were not being used, I
4156 suspect that it won't be the case.
4157
4158 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4159 be necessary for call dummies on a non-executable stack on
4160 SPARC. */
4161
4162 /* See if the breakpoints module can explain the signal. */
4163 random_signal
4164 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4165 ecs->event_thread->suspend.stop_signal);
4166
4167 /* If not, perhaps stepping/nexting can. */
4168 if (random_signal)
4169 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4170 && currently_stepping (ecs->event_thread));
4171
4172 /* Perhaps the thread hit a single-step breakpoint of _another_
4173 thread. Single-step breakpoints are transparent to the
4174 breakpoints module. */
4175 if (random_signal)
4176 random_signal = !ecs->hit_singlestep_breakpoint;
4177
4178 /* No? Perhaps we got a moribund watchpoint. */
4179 if (random_signal)
4180 random_signal = !stopped_by_watchpoint;
4181
4182 /* For the program's own signals, act according to
4183 the signal handling tables. */
4184
4185 if (random_signal)
4186 {
4187 /* Signal not for debugging purposes. */
4188 int printed = 0;
4189 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4190 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4191
4192 if (debug_infrun)
4193 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4194 gdb_signal_to_symbol_string (stop_signal));
4195
4196 stopped_by_random_signal = 1;
4197
4198 if (signal_print[ecs->event_thread->suspend.stop_signal])
4199 {
4200 printed = 1;
4201 target_terminal_ours_for_output ();
4202 print_signal_received_reason
4203 (ecs->event_thread->suspend.stop_signal);
4204 }
4205 /* Always stop on signals if we're either just gaining control
4206 of the program, or the user explicitly requested this thread
4207 to remain stopped. */
4208 if (stop_soon != NO_STOP_QUIETLY
4209 || ecs->event_thread->stop_requested
4210 || (!inf->detaching
4211 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4212 {
4213 stop_stepping (ecs);
4214 return;
4215 }
4216 /* If not going to stop, give terminal back
4217 if we took it away. */
4218 else if (printed)
4219 target_terminal_inferior ();
4220
4221 /* Clear the signal if it should not be passed. */
4222 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4223 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4224
4225 if (ecs->event_thread->prev_pc == stop_pc
4226 && ecs->event_thread->control.trap_expected
4227 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4228 {
4229 /* We were just starting a new sequence, attempting to
4230 single-step off of a breakpoint and expecting a SIGTRAP.
4231 Instead this signal arrives. This signal will take us out
4232 of the stepping range so GDB needs to remember to, when
4233 the signal handler returns, resume stepping off that
4234 breakpoint. */
4235 /* To simplify things, "continue" is forced to use the same
4236 code paths as single-step - set a breakpoint at the
4237 signal return address and then, once hit, step off that
4238 breakpoint. */
4239 if (debug_infrun)
4240 fprintf_unfiltered (gdb_stdlog,
4241 "infrun: signal arrived while stepping over "
4242 "breakpoint\n");
4243
4244 insert_hp_step_resume_breakpoint_at_frame (frame);
4245 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4246 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4247 ecs->event_thread->control.trap_expected = 0;
4248
4249 /* If we were nexting/stepping some other thread, switch to
4250 it, so that we don't continue it, losing control. */
4251 if (!switch_back_to_stepped_thread (ecs))
4252 keep_going (ecs);
4253 return;
4254 }
4255
4256 if (ecs->event_thread->control.step_range_end != 0
4257 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4258 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4259 && frame_id_eq (get_stack_frame_id (frame),
4260 ecs->event_thread->control.step_stack_frame_id)
4261 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4262 {
4263 /* The inferior is about to take a signal that will take it
4264 out of the single step range. Set a breakpoint at the
4265 current PC (which is presumably where the signal handler
4266 will eventually return) and then allow the inferior to
4267 run free.
4268
4269 Note that this is only needed for a signal delivered
4270 while in the single-step range. Nested signals aren't a
4271 problem as they eventually all return. */
4272 if (debug_infrun)
4273 fprintf_unfiltered (gdb_stdlog,
4274 "infrun: signal may take us out of "
4275 "single-step range\n");
4276
4277 insert_hp_step_resume_breakpoint_at_frame (frame);
4278 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4279 ecs->event_thread->control.trap_expected = 0;
4280 keep_going (ecs);
4281 return;
4282 }
4283
4284 /* Note: step_resume_breakpoint may be non-NULL. This occures
4285 when either there's a nested signal, or when there's a
4286 pending signal enabled just as the signal handler returns
4287 (leaving the inferior at the step-resume-breakpoint without
4288 actually executing it). Either way continue until the
4289 breakpoint is really hit. */
4290
4291 if (!switch_back_to_stepped_thread (ecs))
4292 {
4293 if (debug_infrun)
4294 fprintf_unfiltered (gdb_stdlog,
4295 "infrun: random signal, keep going\n");
4296
4297 keep_going (ecs);
4298 }
4299 return;
4300 }
4301
4302 process_event_stop_test (ecs);
4303 }
4304
4305 /* Come here when we've got some debug event / signal we can explain
4306 (IOW, not a random signal), and test whether it should cause a
4307 stop, or whether we should resume the inferior (transparently).
4308 E.g., could be a breakpoint whose condition evaluates false; we
4309 could be still stepping within the line; etc. */
4310
4311 static void
4312 process_event_stop_test (struct execution_control_state *ecs)
4313 {
4314 struct symtab_and_line stop_pc_sal;
4315 struct frame_info *frame;
4316 struct gdbarch *gdbarch;
4317 CORE_ADDR jmp_buf_pc;
4318 struct bpstat_what what;
4319
4320 /* Handle cases caused by hitting a breakpoint. */
4321
4322 frame = get_current_frame ();
4323 gdbarch = get_frame_arch (frame);
4324
4325 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4326
4327 if (what.call_dummy)
4328 {
4329 stop_stack_dummy = what.call_dummy;
4330 }
4331
4332 /* If we hit an internal event that triggers symbol changes, the
4333 current frame will be invalidated within bpstat_what (e.g., if we
4334 hit an internal solib event). Re-fetch it. */
4335 frame = get_current_frame ();
4336 gdbarch = get_frame_arch (frame);
4337
4338 switch (what.main_action)
4339 {
4340 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4341 /* If we hit the breakpoint at longjmp while stepping, we
4342 install a momentary breakpoint at the target of the
4343 jmp_buf. */
4344
4345 if (debug_infrun)
4346 fprintf_unfiltered (gdb_stdlog,
4347 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4348
4349 ecs->event_thread->stepping_over_breakpoint = 1;
4350
4351 if (what.is_longjmp)
4352 {
4353 struct value *arg_value;
4354
4355 /* If we set the longjmp breakpoint via a SystemTap probe,
4356 then use it to extract the arguments. The destination PC
4357 is the third argument to the probe. */
4358 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4359 if (arg_value)
4360 jmp_buf_pc = value_as_address (arg_value);
4361 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4362 || !gdbarch_get_longjmp_target (gdbarch,
4363 frame, &jmp_buf_pc))
4364 {
4365 if (debug_infrun)
4366 fprintf_unfiltered (gdb_stdlog,
4367 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4368 "(!gdbarch_get_longjmp_target)\n");
4369 keep_going (ecs);
4370 return;
4371 }
4372
4373 /* Insert a breakpoint at resume address. */
4374 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4375 }
4376 else
4377 check_exception_resume (ecs, frame);
4378 keep_going (ecs);
4379 return;
4380
4381 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4382 {
4383 struct frame_info *init_frame;
4384
4385 /* There are several cases to consider.
4386
4387 1. The initiating frame no longer exists. In this case we
4388 must stop, because the exception or longjmp has gone too
4389 far.
4390
4391 2. The initiating frame exists, and is the same as the
4392 current frame. We stop, because the exception or longjmp
4393 has been caught.
4394
4395 3. The initiating frame exists and is different from the
4396 current frame. This means the exception or longjmp has
4397 been caught beneath the initiating frame, so keep going.
4398
4399 4. longjmp breakpoint has been placed just to protect
4400 against stale dummy frames and user is not interested in
4401 stopping around longjmps. */
4402
4403 if (debug_infrun)
4404 fprintf_unfiltered (gdb_stdlog,
4405 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4406
4407 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4408 != NULL);
4409 delete_exception_resume_breakpoint (ecs->event_thread);
4410
4411 if (what.is_longjmp)
4412 {
4413 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4414
4415 if (!frame_id_p (ecs->event_thread->initiating_frame))
4416 {
4417 /* Case 4. */
4418 keep_going (ecs);
4419 return;
4420 }
4421 }
4422
4423 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4424
4425 if (init_frame)
4426 {
4427 struct frame_id current_id
4428 = get_frame_id (get_current_frame ());
4429 if (frame_id_eq (current_id,
4430 ecs->event_thread->initiating_frame))
4431 {
4432 /* Case 2. Fall through. */
4433 }
4434 else
4435 {
4436 /* Case 3. */
4437 keep_going (ecs);
4438 return;
4439 }
4440 }
4441
4442 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4443 exists. */
4444 delete_step_resume_breakpoint (ecs->event_thread);
4445
4446 ecs->event_thread->control.stop_step = 1;
4447 print_end_stepping_range_reason ();
4448 stop_stepping (ecs);
4449 }
4450 return;
4451
4452 case BPSTAT_WHAT_SINGLE:
4453 if (debug_infrun)
4454 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4455 ecs->event_thread->stepping_over_breakpoint = 1;
4456 /* Still need to check other stuff, at least the case where we
4457 are stepping and step out of the right range. */
4458 break;
4459
4460 case BPSTAT_WHAT_STEP_RESUME:
4461 if (debug_infrun)
4462 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4463
4464 delete_step_resume_breakpoint (ecs->event_thread);
4465 if (ecs->event_thread->control.proceed_to_finish
4466 && execution_direction == EXEC_REVERSE)
4467 {
4468 struct thread_info *tp = ecs->event_thread;
4469
4470 /* We are finishing a function in reverse, and just hit the
4471 step-resume breakpoint at the start address of the
4472 function, and we're almost there -- just need to back up
4473 by one more single-step, which should take us back to the
4474 function call. */
4475 tp->control.step_range_start = tp->control.step_range_end = 1;
4476 keep_going (ecs);
4477 return;
4478 }
4479 fill_in_stop_func (gdbarch, ecs);
4480 if (stop_pc == ecs->stop_func_start
4481 && execution_direction == EXEC_REVERSE)
4482 {
4483 /* We are stepping over a function call in reverse, and just
4484 hit the step-resume breakpoint at the start address of
4485 the function. Go back to single-stepping, which should
4486 take us back to the function call. */
4487 ecs->event_thread->stepping_over_breakpoint = 1;
4488 keep_going (ecs);
4489 return;
4490 }
4491 break;
4492
4493 case BPSTAT_WHAT_STOP_NOISY:
4494 if (debug_infrun)
4495 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4496 stop_print_frame = 1;
4497
4498 /* Assume the thread stopped for a breapoint. We'll still check
4499 whether a/the breakpoint is there when the thread is next
4500 resumed. */
4501 ecs->event_thread->stepping_over_breakpoint = 1;
4502
4503 stop_stepping (ecs);
4504 return;
4505
4506 case BPSTAT_WHAT_STOP_SILENT:
4507 if (debug_infrun)
4508 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4509 stop_print_frame = 0;
4510
4511 /* Assume the thread stopped for a breapoint. We'll still check
4512 whether a/the breakpoint is there when the thread is next
4513 resumed. */
4514 ecs->event_thread->stepping_over_breakpoint = 1;
4515 stop_stepping (ecs);
4516 return;
4517
4518 case BPSTAT_WHAT_HP_STEP_RESUME:
4519 if (debug_infrun)
4520 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4521
4522 delete_step_resume_breakpoint (ecs->event_thread);
4523 if (ecs->event_thread->step_after_step_resume_breakpoint)
4524 {
4525 /* Back when the step-resume breakpoint was inserted, we
4526 were trying to single-step off a breakpoint. Go back to
4527 doing that. */
4528 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4529 ecs->event_thread->stepping_over_breakpoint = 1;
4530 keep_going (ecs);
4531 return;
4532 }
4533 break;
4534
4535 case BPSTAT_WHAT_KEEP_CHECKING:
4536 break;
4537 }
4538
4539 /* We come here if we hit a breakpoint but should not stop for it.
4540 Possibly we also were stepping and should stop for that. So fall
4541 through and test for stepping. But, if not stepping, do not
4542 stop. */
4543
4544 /* In all-stop mode, if we're currently stepping but have stopped in
4545 some other thread, we need to switch back to the stepped thread. */
4546 if (switch_back_to_stepped_thread (ecs))
4547 return;
4548
4549 if (ecs->event_thread->control.step_resume_breakpoint)
4550 {
4551 if (debug_infrun)
4552 fprintf_unfiltered (gdb_stdlog,
4553 "infrun: step-resume breakpoint is inserted\n");
4554
4555 /* Having a step-resume breakpoint overrides anything
4556 else having to do with stepping commands until
4557 that breakpoint is reached. */
4558 keep_going (ecs);
4559 return;
4560 }
4561
4562 if (ecs->event_thread->control.step_range_end == 0)
4563 {
4564 if (debug_infrun)
4565 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4566 /* Likewise if we aren't even stepping. */
4567 keep_going (ecs);
4568 return;
4569 }
4570
4571 /* Re-fetch current thread's frame in case the code above caused
4572 the frame cache to be re-initialized, making our FRAME variable
4573 a dangling pointer. */
4574 frame = get_current_frame ();
4575 gdbarch = get_frame_arch (frame);
4576 fill_in_stop_func (gdbarch, ecs);
4577
4578 /* If stepping through a line, keep going if still within it.
4579
4580 Note that step_range_end is the address of the first instruction
4581 beyond the step range, and NOT the address of the last instruction
4582 within it!
4583
4584 Note also that during reverse execution, we may be stepping
4585 through a function epilogue and therefore must detect when
4586 the current-frame changes in the middle of a line. */
4587
4588 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4589 && (execution_direction != EXEC_REVERSE
4590 || frame_id_eq (get_frame_id (frame),
4591 ecs->event_thread->control.step_frame_id)))
4592 {
4593 if (debug_infrun)
4594 fprintf_unfiltered
4595 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4596 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4597 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4598
4599 /* Tentatively re-enable range stepping; `resume' disables it if
4600 necessary (e.g., if we're stepping over a breakpoint or we
4601 have software watchpoints). */
4602 ecs->event_thread->control.may_range_step = 1;
4603
4604 /* When stepping backward, stop at beginning of line range
4605 (unless it's the function entry point, in which case
4606 keep going back to the call point). */
4607 if (stop_pc == ecs->event_thread->control.step_range_start
4608 && stop_pc != ecs->stop_func_start
4609 && execution_direction == EXEC_REVERSE)
4610 {
4611 ecs->event_thread->control.stop_step = 1;
4612 print_end_stepping_range_reason ();
4613 stop_stepping (ecs);
4614 }
4615 else
4616 keep_going (ecs);
4617
4618 return;
4619 }
4620
4621 /* We stepped out of the stepping range. */
4622
4623 /* If we are stepping at the source level and entered the runtime
4624 loader dynamic symbol resolution code...
4625
4626 EXEC_FORWARD: we keep on single stepping until we exit the run
4627 time loader code and reach the callee's address.
4628
4629 EXEC_REVERSE: we've already executed the callee (backward), and
4630 the runtime loader code is handled just like any other
4631 undebuggable function call. Now we need only keep stepping
4632 backward through the trampoline code, and that's handled further
4633 down, so there is nothing for us to do here. */
4634
4635 if (execution_direction != EXEC_REVERSE
4636 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4637 && in_solib_dynsym_resolve_code (stop_pc))
4638 {
4639 CORE_ADDR pc_after_resolver =
4640 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4641
4642 if (debug_infrun)
4643 fprintf_unfiltered (gdb_stdlog,
4644 "infrun: stepped into dynsym resolve code\n");
4645
4646 if (pc_after_resolver)
4647 {
4648 /* Set up a step-resume breakpoint at the address
4649 indicated by SKIP_SOLIB_RESOLVER. */
4650 struct symtab_and_line sr_sal;
4651
4652 init_sal (&sr_sal);
4653 sr_sal.pc = pc_after_resolver;
4654 sr_sal.pspace = get_frame_program_space (frame);
4655
4656 insert_step_resume_breakpoint_at_sal (gdbarch,
4657 sr_sal, null_frame_id);
4658 }
4659
4660 keep_going (ecs);
4661 return;
4662 }
4663
4664 if (ecs->event_thread->control.step_range_end != 1
4665 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4666 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4667 && get_frame_type (frame) == SIGTRAMP_FRAME)
4668 {
4669 if (debug_infrun)
4670 fprintf_unfiltered (gdb_stdlog,
4671 "infrun: stepped into signal trampoline\n");
4672 /* The inferior, while doing a "step" or "next", has ended up in
4673 a signal trampoline (either by a signal being delivered or by
4674 the signal handler returning). Just single-step until the
4675 inferior leaves the trampoline (either by calling the handler
4676 or returning). */
4677 keep_going (ecs);
4678 return;
4679 }
4680
4681 /* If we're in the return path from a shared library trampoline,
4682 we want to proceed through the trampoline when stepping. */
4683 /* macro/2012-04-25: This needs to come before the subroutine
4684 call check below as on some targets return trampolines look
4685 like subroutine calls (MIPS16 return thunks). */
4686 if (gdbarch_in_solib_return_trampoline (gdbarch,
4687 stop_pc, ecs->stop_func_name)
4688 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4689 {
4690 /* Determine where this trampoline returns. */
4691 CORE_ADDR real_stop_pc;
4692
4693 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4694
4695 if (debug_infrun)
4696 fprintf_unfiltered (gdb_stdlog,
4697 "infrun: stepped into solib return tramp\n");
4698
4699 /* Only proceed through if we know where it's going. */
4700 if (real_stop_pc)
4701 {
4702 /* And put the step-breakpoint there and go until there. */
4703 struct symtab_and_line sr_sal;
4704
4705 init_sal (&sr_sal); /* initialize to zeroes */
4706 sr_sal.pc = real_stop_pc;
4707 sr_sal.section = find_pc_overlay (sr_sal.pc);
4708 sr_sal.pspace = get_frame_program_space (frame);
4709
4710 /* Do not specify what the fp should be when we stop since
4711 on some machines the prologue is where the new fp value
4712 is established. */
4713 insert_step_resume_breakpoint_at_sal (gdbarch,
4714 sr_sal, null_frame_id);
4715
4716 /* Restart without fiddling with the step ranges or
4717 other state. */
4718 keep_going (ecs);
4719 return;
4720 }
4721 }
4722
4723 /* Check for subroutine calls. The check for the current frame
4724 equalling the step ID is not necessary - the check of the
4725 previous frame's ID is sufficient - but it is a common case and
4726 cheaper than checking the previous frame's ID.
4727
4728 NOTE: frame_id_eq will never report two invalid frame IDs as
4729 being equal, so to get into this block, both the current and
4730 previous frame must have valid frame IDs. */
4731 /* The outer_frame_id check is a heuristic to detect stepping
4732 through startup code. If we step over an instruction which
4733 sets the stack pointer from an invalid value to a valid value,
4734 we may detect that as a subroutine call from the mythical
4735 "outermost" function. This could be fixed by marking
4736 outermost frames as !stack_p,code_p,special_p. Then the
4737 initial outermost frame, before sp was valid, would
4738 have code_addr == &_start. See the comment in frame_id_eq
4739 for more. */
4740 if (!frame_id_eq (get_stack_frame_id (frame),
4741 ecs->event_thread->control.step_stack_frame_id)
4742 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4743 ecs->event_thread->control.step_stack_frame_id)
4744 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4745 outer_frame_id)
4746 || step_start_function != find_pc_function (stop_pc))))
4747 {
4748 CORE_ADDR real_stop_pc;
4749
4750 if (debug_infrun)
4751 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4752
4753 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4754 || ((ecs->event_thread->control.step_range_end == 1)
4755 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4756 ecs->stop_func_start)))
4757 {
4758 /* I presume that step_over_calls is only 0 when we're
4759 supposed to be stepping at the assembly language level
4760 ("stepi"). Just stop. */
4761 /* Also, maybe we just did a "nexti" inside a prolog, so we
4762 thought it was a subroutine call but it was not. Stop as
4763 well. FENN */
4764 /* And this works the same backward as frontward. MVS */
4765 ecs->event_thread->control.stop_step = 1;
4766 print_end_stepping_range_reason ();
4767 stop_stepping (ecs);
4768 return;
4769 }
4770
4771 /* Reverse stepping through solib trampolines. */
4772
4773 if (execution_direction == EXEC_REVERSE
4774 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4775 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4776 || (ecs->stop_func_start == 0
4777 && in_solib_dynsym_resolve_code (stop_pc))))
4778 {
4779 /* Any solib trampoline code can be handled in reverse
4780 by simply continuing to single-step. We have already
4781 executed the solib function (backwards), and a few
4782 steps will take us back through the trampoline to the
4783 caller. */
4784 keep_going (ecs);
4785 return;
4786 }
4787
4788 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4789 {
4790 /* We're doing a "next".
4791
4792 Normal (forward) execution: set a breakpoint at the
4793 callee's return address (the address at which the caller
4794 will resume).
4795
4796 Reverse (backward) execution. set the step-resume
4797 breakpoint at the start of the function that we just
4798 stepped into (backwards), and continue to there. When we
4799 get there, we'll need to single-step back to the caller. */
4800
4801 if (execution_direction == EXEC_REVERSE)
4802 {
4803 /* If we're already at the start of the function, we've either
4804 just stepped backward into a single instruction function,
4805 or stepped back out of a signal handler to the first instruction
4806 of the function. Just keep going, which will single-step back
4807 to the caller. */
4808 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4809 {
4810 struct symtab_and_line sr_sal;
4811
4812 /* Normal function call return (static or dynamic). */
4813 init_sal (&sr_sal);
4814 sr_sal.pc = ecs->stop_func_start;
4815 sr_sal.pspace = get_frame_program_space (frame);
4816 insert_step_resume_breakpoint_at_sal (gdbarch,
4817 sr_sal, null_frame_id);
4818 }
4819 }
4820 else
4821 insert_step_resume_breakpoint_at_caller (frame);
4822
4823 keep_going (ecs);
4824 return;
4825 }
4826
4827 /* If we are in a function call trampoline (a stub between the
4828 calling routine and the real function), locate the real
4829 function. That's what tells us (a) whether we want to step
4830 into it at all, and (b) what prologue we want to run to the
4831 end of, if we do step into it. */
4832 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4833 if (real_stop_pc == 0)
4834 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4835 if (real_stop_pc != 0)
4836 ecs->stop_func_start = real_stop_pc;
4837
4838 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4839 {
4840 struct symtab_and_line sr_sal;
4841
4842 init_sal (&sr_sal);
4843 sr_sal.pc = ecs->stop_func_start;
4844 sr_sal.pspace = get_frame_program_space (frame);
4845
4846 insert_step_resume_breakpoint_at_sal (gdbarch,
4847 sr_sal, null_frame_id);
4848 keep_going (ecs);
4849 return;
4850 }
4851
4852 /* If we have line number information for the function we are
4853 thinking of stepping into and the function isn't on the skip
4854 list, step into it.
4855
4856 If there are several symtabs at that PC (e.g. with include
4857 files), just want to know whether *any* of them have line
4858 numbers. find_pc_line handles this. */
4859 {
4860 struct symtab_and_line tmp_sal;
4861
4862 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4863 if (tmp_sal.line != 0
4864 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4865 &tmp_sal))
4866 {
4867 if (execution_direction == EXEC_REVERSE)
4868 handle_step_into_function_backward (gdbarch, ecs);
4869 else
4870 handle_step_into_function (gdbarch, ecs);
4871 return;
4872 }
4873 }
4874
4875 /* If we have no line number and the step-stop-if-no-debug is
4876 set, we stop the step so that the user has a chance to switch
4877 in assembly mode. */
4878 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4879 && step_stop_if_no_debug)
4880 {
4881 ecs->event_thread->control.stop_step = 1;
4882 print_end_stepping_range_reason ();
4883 stop_stepping (ecs);
4884 return;
4885 }
4886
4887 if (execution_direction == EXEC_REVERSE)
4888 {
4889 /* If we're already at the start of the function, we've either just
4890 stepped backward into a single instruction function without line
4891 number info, or stepped back out of a signal handler to the first
4892 instruction of the function without line number info. Just keep
4893 going, which will single-step back to the caller. */
4894 if (ecs->stop_func_start != stop_pc)
4895 {
4896 /* Set a breakpoint at callee's start address.
4897 From there we can step once and be back in the caller. */
4898 struct symtab_and_line sr_sal;
4899
4900 init_sal (&sr_sal);
4901 sr_sal.pc = ecs->stop_func_start;
4902 sr_sal.pspace = get_frame_program_space (frame);
4903 insert_step_resume_breakpoint_at_sal (gdbarch,
4904 sr_sal, null_frame_id);
4905 }
4906 }
4907 else
4908 /* Set a breakpoint at callee's return address (the address
4909 at which the caller will resume). */
4910 insert_step_resume_breakpoint_at_caller (frame);
4911
4912 keep_going (ecs);
4913 return;
4914 }
4915
4916 /* Reverse stepping through solib trampolines. */
4917
4918 if (execution_direction == EXEC_REVERSE
4919 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4920 {
4921 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4922 || (ecs->stop_func_start == 0
4923 && in_solib_dynsym_resolve_code (stop_pc)))
4924 {
4925 /* Any solib trampoline code can be handled in reverse
4926 by simply continuing to single-step. We have already
4927 executed the solib function (backwards), and a few
4928 steps will take us back through the trampoline to the
4929 caller. */
4930 keep_going (ecs);
4931 return;
4932 }
4933 else if (in_solib_dynsym_resolve_code (stop_pc))
4934 {
4935 /* Stepped backward into the solib dynsym resolver.
4936 Set a breakpoint at its start and continue, then
4937 one more step will take us out. */
4938 struct symtab_and_line sr_sal;
4939
4940 init_sal (&sr_sal);
4941 sr_sal.pc = ecs->stop_func_start;
4942 sr_sal.pspace = get_frame_program_space (frame);
4943 insert_step_resume_breakpoint_at_sal (gdbarch,
4944 sr_sal, null_frame_id);
4945 keep_going (ecs);
4946 return;
4947 }
4948 }
4949
4950 stop_pc_sal = find_pc_line (stop_pc, 0);
4951
4952 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4953 the trampoline processing logic, however, there are some trampolines
4954 that have no names, so we should do trampoline handling first. */
4955 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4956 && ecs->stop_func_name == NULL
4957 && stop_pc_sal.line == 0)
4958 {
4959 if (debug_infrun)
4960 fprintf_unfiltered (gdb_stdlog,
4961 "infrun: stepped into undebuggable function\n");
4962
4963 /* The inferior just stepped into, or returned to, an
4964 undebuggable function (where there is no debugging information
4965 and no line number corresponding to the address where the
4966 inferior stopped). Since we want to skip this kind of code,
4967 we keep going until the inferior returns from this
4968 function - unless the user has asked us not to (via
4969 set step-mode) or we no longer know how to get back
4970 to the call site. */
4971 if (step_stop_if_no_debug
4972 || !frame_id_p (frame_unwind_caller_id (frame)))
4973 {
4974 /* If we have no line number and the step-stop-if-no-debug
4975 is set, we stop the step so that the user has a chance to
4976 switch in assembly mode. */
4977 ecs->event_thread->control.stop_step = 1;
4978 print_end_stepping_range_reason ();
4979 stop_stepping (ecs);
4980 return;
4981 }
4982 else
4983 {
4984 /* Set a breakpoint at callee's return address (the address
4985 at which the caller will resume). */
4986 insert_step_resume_breakpoint_at_caller (frame);
4987 keep_going (ecs);
4988 return;
4989 }
4990 }
4991
4992 if (ecs->event_thread->control.step_range_end == 1)
4993 {
4994 /* It is stepi or nexti. We always want to stop stepping after
4995 one instruction. */
4996 if (debug_infrun)
4997 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4998 ecs->event_thread->control.stop_step = 1;
4999 print_end_stepping_range_reason ();
5000 stop_stepping (ecs);
5001 return;
5002 }
5003
5004 if (stop_pc_sal.line == 0)
5005 {
5006 /* We have no line number information. That means to stop
5007 stepping (does this always happen right after one instruction,
5008 when we do "s" in a function with no line numbers,
5009 or can this happen as a result of a return or longjmp?). */
5010 if (debug_infrun)
5011 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5012 ecs->event_thread->control.stop_step = 1;
5013 print_end_stepping_range_reason ();
5014 stop_stepping (ecs);
5015 return;
5016 }
5017
5018 /* Look for "calls" to inlined functions, part one. If the inline
5019 frame machinery detected some skipped call sites, we have entered
5020 a new inline function. */
5021
5022 if (frame_id_eq (get_frame_id (get_current_frame ()),
5023 ecs->event_thread->control.step_frame_id)
5024 && inline_skipped_frames (ecs->ptid))
5025 {
5026 struct symtab_and_line call_sal;
5027
5028 if (debug_infrun)
5029 fprintf_unfiltered (gdb_stdlog,
5030 "infrun: stepped into inlined function\n");
5031
5032 find_frame_sal (get_current_frame (), &call_sal);
5033
5034 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5035 {
5036 /* For "step", we're going to stop. But if the call site
5037 for this inlined function is on the same source line as
5038 we were previously stepping, go down into the function
5039 first. Otherwise stop at the call site. */
5040
5041 if (call_sal.line == ecs->event_thread->current_line
5042 && call_sal.symtab == ecs->event_thread->current_symtab)
5043 step_into_inline_frame (ecs->ptid);
5044
5045 ecs->event_thread->control.stop_step = 1;
5046 print_end_stepping_range_reason ();
5047 stop_stepping (ecs);
5048 return;
5049 }
5050 else
5051 {
5052 /* For "next", we should stop at the call site if it is on a
5053 different source line. Otherwise continue through the
5054 inlined function. */
5055 if (call_sal.line == ecs->event_thread->current_line
5056 && call_sal.symtab == ecs->event_thread->current_symtab)
5057 keep_going (ecs);
5058 else
5059 {
5060 ecs->event_thread->control.stop_step = 1;
5061 print_end_stepping_range_reason ();
5062 stop_stepping (ecs);
5063 }
5064 return;
5065 }
5066 }
5067
5068 /* Look for "calls" to inlined functions, part two. If we are still
5069 in the same real function we were stepping through, but we have
5070 to go further up to find the exact frame ID, we are stepping
5071 through a more inlined call beyond its call site. */
5072
5073 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5074 && !frame_id_eq (get_frame_id (get_current_frame ()),
5075 ecs->event_thread->control.step_frame_id)
5076 && stepped_in_from (get_current_frame (),
5077 ecs->event_thread->control.step_frame_id))
5078 {
5079 if (debug_infrun)
5080 fprintf_unfiltered (gdb_stdlog,
5081 "infrun: stepping through inlined function\n");
5082
5083 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5084 keep_going (ecs);
5085 else
5086 {
5087 ecs->event_thread->control.stop_step = 1;
5088 print_end_stepping_range_reason ();
5089 stop_stepping (ecs);
5090 }
5091 return;
5092 }
5093
5094 if ((stop_pc == stop_pc_sal.pc)
5095 && (ecs->event_thread->current_line != stop_pc_sal.line
5096 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5097 {
5098 /* We are at the start of a different line. So stop. Note that
5099 we don't stop if we step into the middle of a different line.
5100 That is said to make things like for (;;) statements work
5101 better. */
5102 if (debug_infrun)
5103 fprintf_unfiltered (gdb_stdlog,
5104 "infrun: stepped to a different line\n");
5105 ecs->event_thread->control.stop_step = 1;
5106 print_end_stepping_range_reason ();
5107 stop_stepping (ecs);
5108 return;
5109 }
5110
5111 /* We aren't done stepping.
5112
5113 Optimize by setting the stepping range to the line.
5114 (We might not be in the original line, but if we entered a
5115 new line in mid-statement, we continue stepping. This makes
5116 things like for(;;) statements work better.) */
5117
5118 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5119 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5120 ecs->event_thread->control.may_range_step = 1;
5121 set_step_info (frame, stop_pc_sal);
5122
5123 if (debug_infrun)
5124 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5125 keep_going (ecs);
5126 }
5127
5128 /* In all-stop mode, if we're currently stepping but have stopped in
5129 some other thread, we may need to switch back to the stepped
5130 thread. Returns true we set the inferior running, false if we left
5131 it stopped (and the event needs further processing). */
5132
5133 static int
5134 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5135 {
5136 if (!non_stop)
5137 {
5138 struct thread_info *tp;
5139 struct thread_info *stepping_thread;
5140
5141 /* If any thread is blocked on some internal breakpoint, and we
5142 simply need to step over that breakpoint to get it going
5143 again, do that first. */
5144
5145 /* However, if we see an event for the stepping thread, then we
5146 know all other threads have been moved past their breakpoints
5147 already. Let the caller check whether the step is finished,
5148 etc., before deciding to move it past a breakpoint. */
5149 if (ecs->event_thread->control.step_range_end != 0)
5150 return 0;
5151
5152 /* Check if the current thread is blocked on an incomplete
5153 step-over, interrupted by a random signal. */
5154 if (ecs->event_thread->control.trap_expected
5155 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5156 {
5157 if (debug_infrun)
5158 {
5159 fprintf_unfiltered (gdb_stdlog,
5160 "infrun: need to finish step-over of [%s]\n",
5161 target_pid_to_str (ecs->event_thread->ptid));
5162 }
5163 keep_going (ecs);
5164 return 1;
5165 }
5166
5167 /* Check if the current thread is blocked by a single-step
5168 breakpoint of another thread. */
5169 if (ecs->hit_singlestep_breakpoint)
5170 {
5171 if (debug_infrun)
5172 {
5173 fprintf_unfiltered (gdb_stdlog,
5174 "infrun: need to step [%s] over single-step "
5175 "breakpoint\n",
5176 target_pid_to_str (ecs->ptid));
5177 }
5178 keep_going (ecs);
5179 return 1;
5180 }
5181
5182 stepping_thread
5183 = iterate_over_threads (currently_stepping_or_nexting_callback,
5184 ecs->event_thread);
5185
5186 /* Check if any other thread except the stepping thread that
5187 needs to start a step-over. Do that before actually
5188 proceeding with step/next/etc. */
5189 tp = find_thread_needs_step_over (stepping_thread != NULL,
5190 stepping_thread);
5191 if (tp != NULL)
5192 {
5193 if (debug_infrun)
5194 {
5195 fprintf_unfiltered (gdb_stdlog,
5196 "infrun: need to step-over [%s]\n",
5197 target_pid_to_str (tp->ptid));
5198 }
5199
5200 gdb_assert (!tp->control.trap_expected);
5201 gdb_assert (tp->control.step_range_end == 0);
5202
5203 /* We no longer expect a trap in the current thread. Clear
5204 the trap_expected flag before switching. This is what
5205 keep_going would do as well, if we called it. */
5206 ecs->event_thread->control.trap_expected = 0;
5207
5208 ecs->ptid = tp->ptid;
5209 ecs->event_thread = tp;
5210 switch_to_thread (ecs->ptid);
5211 keep_going (ecs);
5212 return 1;
5213 }
5214
5215 tp = stepping_thread;
5216 if (tp != NULL)
5217 {
5218 struct frame_info *frame;
5219 struct gdbarch *gdbarch;
5220
5221 /* If the stepping thread exited, then don't try to switch
5222 back and resume it, which could fail in several different
5223 ways depending on the target. Instead, just keep going.
5224
5225 We can find a stepping dead thread in the thread list in
5226 two cases:
5227
5228 - The target supports thread exit events, and when the
5229 target tries to delete the thread from the thread list,
5230 inferior_ptid pointed at the exiting thread. In such
5231 case, calling delete_thread does not really remove the
5232 thread from the list; instead, the thread is left listed,
5233 with 'exited' state.
5234
5235 - The target's debug interface does not support thread
5236 exit events, and so we have no idea whatsoever if the
5237 previously stepping thread is still alive. For that
5238 reason, we need to synchronously query the target
5239 now. */
5240 if (is_exited (tp->ptid)
5241 || !target_thread_alive (tp->ptid))
5242 {
5243 if (debug_infrun)
5244 fprintf_unfiltered (gdb_stdlog,
5245 "infrun: not switching back to "
5246 "stepped thread, it has vanished\n");
5247
5248 delete_thread (tp->ptid);
5249 keep_going (ecs);
5250 return 1;
5251 }
5252
5253 /* Otherwise, we no longer expect a trap in the current thread.
5254 Clear the trap_expected flag before switching back -- this is
5255 what keep_going would do as well, if we called it. */
5256 ecs->event_thread->control.trap_expected = 0;
5257
5258 if (debug_infrun)
5259 fprintf_unfiltered (gdb_stdlog,
5260 "infrun: switching back to stepped thread\n");
5261
5262 ecs->event_thread = tp;
5263 ecs->ptid = tp->ptid;
5264 context_switch (ecs->ptid);
5265
5266 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5267 frame = get_current_frame ();
5268 gdbarch = get_frame_arch (frame);
5269
5270 /* If the PC of the thread we were trying to single-step has
5271 changed, then that thread has trapped or been signaled,
5272 but the event has not been reported to GDB yet. Re-poll
5273 the target looking for this particular thread's event
5274 (i.e. temporarily enable schedlock) by:
5275
5276 - setting a break at the current PC
5277 - resuming that particular thread, only (by setting
5278 trap expected)
5279
5280 This prevents us continuously moving the single-step
5281 breakpoint forward, one instruction at a time,
5282 overstepping. */
5283
5284 if (gdbarch_software_single_step_p (gdbarch)
5285 && stop_pc != tp->prev_pc)
5286 {
5287 if (debug_infrun)
5288 fprintf_unfiltered (gdb_stdlog,
5289 "infrun: expected thread advanced also\n");
5290
5291 insert_single_step_breakpoint (get_frame_arch (frame),
5292 get_frame_address_space (frame),
5293 stop_pc);
5294 singlestep_breakpoints_inserted_p = 1;
5295 ecs->event_thread->control.trap_expected = 1;
5296 singlestep_ptid = inferior_ptid;
5297 singlestep_pc = stop_pc;
5298
5299 resume (0, GDB_SIGNAL_0);
5300 prepare_to_wait (ecs);
5301 }
5302 else
5303 {
5304 if (debug_infrun)
5305 fprintf_unfiltered (gdb_stdlog,
5306 "infrun: expected thread still "
5307 "hasn't advanced\n");
5308 keep_going (ecs);
5309 }
5310
5311 return 1;
5312 }
5313 }
5314 return 0;
5315 }
5316
5317 /* Is thread TP in the middle of single-stepping? */
5318
5319 static int
5320 currently_stepping (struct thread_info *tp)
5321 {
5322 return ((tp->control.step_range_end
5323 && tp->control.step_resume_breakpoint == NULL)
5324 || tp->control.trap_expected
5325 || bpstat_should_step ());
5326 }
5327
5328 /* Returns true if any thread *but* the one passed in "data" is in the
5329 middle of stepping or of handling a "next". */
5330
5331 static int
5332 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5333 {
5334 if (tp == data)
5335 return 0;
5336
5337 return (tp->control.step_range_end
5338 || tp->control.trap_expected);
5339 }
5340
5341 /* Inferior has stepped into a subroutine call with source code that
5342 we should not step over. Do step to the first line of code in
5343 it. */
5344
5345 static void
5346 handle_step_into_function (struct gdbarch *gdbarch,
5347 struct execution_control_state *ecs)
5348 {
5349 struct symtab *s;
5350 struct symtab_and_line stop_func_sal, sr_sal;
5351
5352 fill_in_stop_func (gdbarch, ecs);
5353
5354 s = find_pc_symtab (stop_pc);
5355 if (s && s->language != language_asm)
5356 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5357 ecs->stop_func_start);
5358
5359 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5360 /* Use the step_resume_break to step until the end of the prologue,
5361 even if that involves jumps (as it seems to on the vax under
5362 4.2). */
5363 /* If the prologue ends in the middle of a source line, continue to
5364 the end of that source line (if it is still within the function).
5365 Otherwise, just go to end of prologue. */
5366 if (stop_func_sal.end
5367 && stop_func_sal.pc != ecs->stop_func_start
5368 && stop_func_sal.end < ecs->stop_func_end)
5369 ecs->stop_func_start = stop_func_sal.end;
5370
5371 /* Architectures which require breakpoint adjustment might not be able
5372 to place a breakpoint at the computed address. If so, the test
5373 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5374 ecs->stop_func_start to an address at which a breakpoint may be
5375 legitimately placed.
5376
5377 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5378 made, GDB will enter an infinite loop when stepping through
5379 optimized code consisting of VLIW instructions which contain
5380 subinstructions corresponding to different source lines. On
5381 FR-V, it's not permitted to place a breakpoint on any but the
5382 first subinstruction of a VLIW instruction. When a breakpoint is
5383 set, GDB will adjust the breakpoint address to the beginning of
5384 the VLIW instruction. Thus, we need to make the corresponding
5385 adjustment here when computing the stop address. */
5386
5387 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5388 {
5389 ecs->stop_func_start
5390 = gdbarch_adjust_breakpoint_address (gdbarch,
5391 ecs->stop_func_start);
5392 }
5393
5394 if (ecs->stop_func_start == stop_pc)
5395 {
5396 /* We are already there: stop now. */
5397 ecs->event_thread->control.stop_step = 1;
5398 print_end_stepping_range_reason ();
5399 stop_stepping (ecs);
5400 return;
5401 }
5402 else
5403 {
5404 /* Put the step-breakpoint there and go until there. */
5405 init_sal (&sr_sal); /* initialize to zeroes */
5406 sr_sal.pc = ecs->stop_func_start;
5407 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5408 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5409
5410 /* Do not specify what the fp should be when we stop since on
5411 some machines the prologue is where the new fp value is
5412 established. */
5413 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5414
5415 /* And make sure stepping stops right away then. */
5416 ecs->event_thread->control.step_range_end
5417 = ecs->event_thread->control.step_range_start;
5418 }
5419 keep_going (ecs);
5420 }
5421
5422 /* Inferior has stepped backward into a subroutine call with source
5423 code that we should not step over. Do step to the beginning of the
5424 last line of code in it. */
5425
5426 static void
5427 handle_step_into_function_backward (struct gdbarch *gdbarch,
5428 struct execution_control_state *ecs)
5429 {
5430 struct symtab *s;
5431 struct symtab_and_line stop_func_sal;
5432
5433 fill_in_stop_func (gdbarch, ecs);
5434
5435 s = find_pc_symtab (stop_pc);
5436 if (s && s->language != language_asm)
5437 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5438 ecs->stop_func_start);
5439
5440 stop_func_sal = find_pc_line (stop_pc, 0);
5441
5442 /* OK, we're just going to keep stepping here. */
5443 if (stop_func_sal.pc == stop_pc)
5444 {
5445 /* We're there already. Just stop stepping now. */
5446 ecs->event_thread->control.stop_step = 1;
5447 print_end_stepping_range_reason ();
5448 stop_stepping (ecs);
5449 }
5450 else
5451 {
5452 /* Else just reset the step range and keep going.
5453 No step-resume breakpoint, they don't work for
5454 epilogues, which can have multiple entry paths. */
5455 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5456 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5457 keep_going (ecs);
5458 }
5459 return;
5460 }
5461
5462 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5463 This is used to both functions and to skip over code. */
5464
5465 static void
5466 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5467 struct symtab_and_line sr_sal,
5468 struct frame_id sr_id,
5469 enum bptype sr_type)
5470 {
5471 /* There should never be more than one step-resume or longjmp-resume
5472 breakpoint per thread, so we should never be setting a new
5473 step_resume_breakpoint when one is already active. */
5474 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5475 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5476
5477 if (debug_infrun)
5478 fprintf_unfiltered (gdb_stdlog,
5479 "infrun: inserting step-resume breakpoint at %s\n",
5480 paddress (gdbarch, sr_sal.pc));
5481
5482 inferior_thread ()->control.step_resume_breakpoint
5483 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5484 }
5485
5486 void
5487 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5488 struct symtab_and_line sr_sal,
5489 struct frame_id sr_id)
5490 {
5491 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5492 sr_sal, sr_id,
5493 bp_step_resume);
5494 }
5495
5496 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5497 This is used to skip a potential signal handler.
5498
5499 This is called with the interrupted function's frame. The signal
5500 handler, when it returns, will resume the interrupted function at
5501 RETURN_FRAME.pc. */
5502
5503 static void
5504 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5505 {
5506 struct symtab_and_line sr_sal;
5507 struct gdbarch *gdbarch;
5508
5509 gdb_assert (return_frame != NULL);
5510 init_sal (&sr_sal); /* initialize to zeros */
5511
5512 gdbarch = get_frame_arch (return_frame);
5513 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5514 sr_sal.section = find_pc_overlay (sr_sal.pc);
5515 sr_sal.pspace = get_frame_program_space (return_frame);
5516
5517 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5518 get_stack_frame_id (return_frame),
5519 bp_hp_step_resume);
5520 }
5521
5522 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5523 is used to skip a function after stepping into it (for "next" or if
5524 the called function has no debugging information).
5525
5526 The current function has almost always been reached by single
5527 stepping a call or return instruction. NEXT_FRAME belongs to the
5528 current function, and the breakpoint will be set at the caller's
5529 resume address.
5530
5531 This is a separate function rather than reusing
5532 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5533 get_prev_frame, which may stop prematurely (see the implementation
5534 of frame_unwind_caller_id for an example). */
5535
5536 static void
5537 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5538 {
5539 struct symtab_and_line sr_sal;
5540 struct gdbarch *gdbarch;
5541
5542 /* We shouldn't have gotten here if we don't know where the call site
5543 is. */
5544 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5545
5546 init_sal (&sr_sal); /* initialize to zeros */
5547
5548 gdbarch = frame_unwind_caller_arch (next_frame);
5549 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5550 frame_unwind_caller_pc (next_frame));
5551 sr_sal.section = find_pc_overlay (sr_sal.pc);
5552 sr_sal.pspace = frame_unwind_program_space (next_frame);
5553
5554 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5555 frame_unwind_caller_id (next_frame));
5556 }
5557
5558 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5559 new breakpoint at the target of a jmp_buf. The handling of
5560 longjmp-resume uses the same mechanisms used for handling
5561 "step-resume" breakpoints. */
5562
5563 static void
5564 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5565 {
5566 /* There should never be more than one longjmp-resume breakpoint per
5567 thread, so we should never be setting a new
5568 longjmp_resume_breakpoint when one is already active. */
5569 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5570
5571 if (debug_infrun)
5572 fprintf_unfiltered (gdb_stdlog,
5573 "infrun: inserting longjmp-resume breakpoint at %s\n",
5574 paddress (gdbarch, pc));
5575
5576 inferior_thread ()->control.exception_resume_breakpoint =
5577 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5578 }
5579
5580 /* Insert an exception resume breakpoint. TP is the thread throwing
5581 the exception. The block B is the block of the unwinder debug hook
5582 function. FRAME is the frame corresponding to the call to this
5583 function. SYM is the symbol of the function argument holding the
5584 target PC of the exception. */
5585
5586 static void
5587 insert_exception_resume_breakpoint (struct thread_info *tp,
5588 struct block *b,
5589 struct frame_info *frame,
5590 struct symbol *sym)
5591 {
5592 volatile struct gdb_exception e;
5593
5594 /* We want to ignore errors here. */
5595 TRY_CATCH (e, RETURN_MASK_ERROR)
5596 {
5597 struct symbol *vsym;
5598 struct value *value;
5599 CORE_ADDR handler;
5600 struct breakpoint *bp;
5601
5602 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5603 value = read_var_value (vsym, frame);
5604 /* If the value was optimized out, revert to the old behavior. */
5605 if (! value_optimized_out (value))
5606 {
5607 handler = value_as_address (value);
5608
5609 if (debug_infrun)
5610 fprintf_unfiltered (gdb_stdlog,
5611 "infrun: exception resume at %lx\n",
5612 (unsigned long) handler);
5613
5614 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5615 handler, bp_exception_resume);
5616
5617 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5618 frame = NULL;
5619
5620 bp->thread = tp->num;
5621 inferior_thread ()->control.exception_resume_breakpoint = bp;
5622 }
5623 }
5624 }
5625
5626 /* A helper for check_exception_resume that sets an
5627 exception-breakpoint based on a SystemTap probe. */
5628
5629 static void
5630 insert_exception_resume_from_probe (struct thread_info *tp,
5631 const struct bound_probe *probe,
5632 struct frame_info *frame)
5633 {
5634 struct value *arg_value;
5635 CORE_ADDR handler;
5636 struct breakpoint *bp;
5637
5638 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5639 if (!arg_value)
5640 return;
5641
5642 handler = value_as_address (arg_value);
5643
5644 if (debug_infrun)
5645 fprintf_unfiltered (gdb_stdlog,
5646 "infrun: exception resume at %s\n",
5647 paddress (get_objfile_arch (probe->objfile),
5648 handler));
5649
5650 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5651 handler, bp_exception_resume);
5652 bp->thread = tp->num;
5653 inferior_thread ()->control.exception_resume_breakpoint = bp;
5654 }
5655
5656 /* This is called when an exception has been intercepted. Check to
5657 see whether the exception's destination is of interest, and if so,
5658 set an exception resume breakpoint there. */
5659
5660 static void
5661 check_exception_resume (struct execution_control_state *ecs,
5662 struct frame_info *frame)
5663 {
5664 volatile struct gdb_exception e;
5665 struct bound_probe probe;
5666 struct symbol *func;
5667
5668 /* First see if this exception unwinding breakpoint was set via a
5669 SystemTap probe point. If so, the probe has two arguments: the
5670 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5671 set a breakpoint there. */
5672 probe = find_probe_by_pc (get_frame_pc (frame));
5673 if (probe.probe)
5674 {
5675 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5676 return;
5677 }
5678
5679 func = get_frame_function (frame);
5680 if (!func)
5681 return;
5682
5683 TRY_CATCH (e, RETURN_MASK_ERROR)
5684 {
5685 struct block *b;
5686 struct block_iterator iter;
5687 struct symbol *sym;
5688 int argno = 0;
5689
5690 /* The exception breakpoint is a thread-specific breakpoint on
5691 the unwinder's debug hook, declared as:
5692
5693 void _Unwind_DebugHook (void *cfa, void *handler);
5694
5695 The CFA argument indicates the frame to which control is
5696 about to be transferred. HANDLER is the destination PC.
5697
5698 We ignore the CFA and set a temporary breakpoint at HANDLER.
5699 This is not extremely efficient but it avoids issues in gdb
5700 with computing the DWARF CFA, and it also works even in weird
5701 cases such as throwing an exception from inside a signal
5702 handler. */
5703
5704 b = SYMBOL_BLOCK_VALUE (func);
5705 ALL_BLOCK_SYMBOLS (b, iter, sym)
5706 {
5707 if (!SYMBOL_IS_ARGUMENT (sym))
5708 continue;
5709
5710 if (argno == 0)
5711 ++argno;
5712 else
5713 {
5714 insert_exception_resume_breakpoint (ecs->event_thread,
5715 b, frame, sym);
5716 break;
5717 }
5718 }
5719 }
5720 }
5721
5722 static void
5723 stop_stepping (struct execution_control_state *ecs)
5724 {
5725 if (debug_infrun)
5726 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5727
5728 clear_step_over_info ();
5729
5730 /* Let callers know we don't want to wait for the inferior anymore. */
5731 ecs->wait_some_more = 0;
5732 }
5733
5734 /* Called when we should continue running the inferior, because the
5735 current event doesn't cause a user visible stop. This does the
5736 resuming part; waiting for the next event is done elsewhere. */
5737
5738 static void
5739 keep_going (struct execution_control_state *ecs)
5740 {
5741 /* Make sure normal_stop is called if we get a QUIT handled before
5742 reaching resume. */
5743 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5744
5745 /* Save the pc before execution, to compare with pc after stop. */
5746 ecs->event_thread->prev_pc
5747 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5748
5749 if (ecs->event_thread->control.trap_expected
5750 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5751 {
5752 /* We haven't yet gotten our trap, and either: intercepted a
5753 non-signal event (e.g., a fork); or took a signal which we
5754 are supposed to pass through to the inferior. Simply
5755 continue. */
5756 discard_cleanups (old_cleanups);
5757 resume (currently_stepping (ecs->event_thread),
5758 ecs->event_thread->suspend.stop_signal);
5759 }
5760 else
5761 {
5762 volatile struct gdb_exception e;
5763 struct regcache *regcache = get_current_regcache ();
5764
5765 /* Either the trap was not expected, but we are continuing
5766 anyway (if we got a signal, the user asked it be passed to
5767 the child)
5768 -- or --
5769 We got our expected trap, but decided we should resume from
5770 it.
5771
5772 We're going to run this baby now!
5773
5774 Note that insert_breakpoints won't try to re-insert
5775 already inserted breakpoints. Therefore, we don't
5776 care if breakpoints were already inserted, or not. */
5777
5778 /* If we need to step over a breakpoint, and we're not using
5779 displaced stepping to do so, insert all breakpoints
5780 (watchpoints, etc.) but the one we're stepping over, step one
5781 instruction, and then re-insert the breakpoint when that step
5782 is finished. */
5783 if ((ecs->hit_singlestep_breakpoint
5784 || thread_still_needs_step_over (ecs->event_thread))
5785 && !use_displaced_stepping (get_regcache_arch (regcache)))
5786 {
5787 set_step_over_info (get_regcache_aspace (regcache),
5788 regcache_read_pc (regcache));
5789 }
5790 else
5791 clear_step_over_info ();
5792
5793 /* Stop stepping if inserting breakpoints fails. */
5794 TRY_CATCH (e, RETURN_MASK_ERROR)
5795 {
5796 insert_breakpoints ();
5797 }
5798 if (e.reason < 0)
5799 {
5800 exception_print (gdb_stderr, e);
5801 stop_stepping (ecs);
5802 return;
5803 }
5804
5805 ecs->event_thread->control.trap_expected
5806 = (ecs->event_thread->stepping_over_breakpoint
5807 || ecs->hit_singlestep_breakpoint);
5808
5809 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5810 explicitly specifies that such a signal should be delivered
5811 to the target program). Typically, that would occur when a
5812 user is debugging a target monitor on a simulator: the target
5813 monitor sets a breakpoint; the simulator encounters this
5814 breakpoint and halts the simulation handing control to GDB;
5815 GDB, noting that the stop address doesn't map to any known
5816 breakpoint, returns control back to the simulator; the
5817 simulator then delivers the hardware equivalent of a
5818 GDB_SIGNAL_TRAP to the program being debugged. */
5819 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5820 && !signal_program[ecs->event_thread->suspend.stop_signal])
5821 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5822
5823 discard_cleanups (old_cleanups);
5824 resume (currently_stepping (ecs->event_thread),
5825 ecs->event_thread->suspend.stop_signal);
5826 }
5827
5828 prepare_to_wait (ecs);
5829 }
5830
5831 /* This function normally comes after a resume, before
5832 handle_inferior_event exits. It takes care of any last bits of
5833 housekeeping, and sets the all-important wait_some_more flag. */
5834
5835 static void
5836 prepare_to_wait (struct execution_control_state *ecs)
5837 {
5838 if (debug_infrun)
5839 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5840
5841 /* This is the old end of the while loop. Let everybody know we
5842 want to wait for the inferior some more and get called again
5843 soon. */
5844 ecs->wait_some_more = 1;
5845 }
5846
5847 /* Several print_*_reason functions to print why the inferior has stopped.
5848 We always print something when the inferior exits, or receives a signal.
5849 The rest of the cases are dealt with later on in normal_stop and
5850 print_it_typical. Ideally there should be a call to one of these
5851 print_*_reason functions functions from handle_inferior_event each time
5852 stop_stepping is called. */
5853
5854 /* Print why the inferior has stopped.
5855 We are done with a step/next/si/ni command, print why the inferior has
5856 stopped. For now print nothing. Print a message only if not in the middle
5857 of doing a "step n" operation for n > 1. */
5858
5859 static void
5860 print_end_stepping_range_reason (void)
5861 {
5862 if ((!inferior_thread ()->step_multi
5863 || !inferior_thread ()->control.stop_step)
5864 && ui_out_is_mi_like_p (current_uiout))
5865 ui_out_field_string (current_uiout, "reason",
5866 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5867 }
5868
5869 /* The inferior was terminated by a signal, print why it stopped. */
5870
5871 static void
5872 print_signal_exited_reason (enum gdb_signal siggnal)
5873 {
5874 struct ui_out *uiout = current_uiout;
5875
5876 annotate_signalled ();
5877 if (ui_out_is_mi_like_p (uiout))
5878 ui_out_field_string
5879 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5880 ui_out_text (uiout, "\nProgram terminated with signal ");
5881 annotate_signal_name ();
5882 ui_out_field_string (uiout, "signal-name",
5883 gdb_signal_to_name (siggnal));
5884 annotate_signal_name_end ();
5885 ui_out_text (uiout, ", ");
5886 annotate_signal_string ();
5887 ui_out_field_string (uiout, "signal-meaning",
5888 gdb_signal_to_string (siggnal));
5889 annotate_signal_string_end ();
5890 ui_out_text (uiout, ".\n");
5891 ui_out_text (uiout, "The program no longer exists.\n");
5892 }
5893
5894 /* The inferior program is finished, print why it stopped. */
5895
5896 static void
5897 print_exited_reason (int exitstatus)
5898 {
5899 struct inferior *inf = current_inferior ();
5900 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5901 struct ui_out *uiout = current_uiout;
5902
5903 annotate_exited (exitstatus);
5904 if (exitstatus)
5905 {
5906 if (ui_out_is_mi_like_p (uiout))
5907 ui_out_field_string (uiout, "reason",
5908 async_reason_lookup (EXEC_ASYNC_EXITED));
5909 ui_out_text (uiout, "[Inferior ");
5910 ui_out_text (uiout, plongest (inf->num));
5911 ui_out_text (uiout, " (");
5912 ui_out_text (uiout, pidstr);
5913 ui_out_text (uiout, ") exited with code ");
5914 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5915 ui_out_text (uiout, "]\n");
5916 }
5917 else
5918 {
5919 if (ui_out_is_mi_like_p (uiout))
5920 ui_out_field_string
5921 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5922 ui_out_text (uiout, "[Inferior ");
5923 ui_out_text (uiout, plongest (inf->num));
5924 ui_out_text (uiout, " (");
5925 ui_out_text (uiout, pidstr);
5926 ui_out_text (uiout, ") exited normally]\n");
5927 }
5928 /* Support the --return-child-result option. */
5929 return_child_result_value = exitstatus;
5930 }
5931
5932 /* Signal received, print why the inferior has stopped. The signal table
5933 tells us to print about it. */
5934
5935 static void
5936 print_signal_received_reason (enum gdb_signal siggnal)
5937 {
5938 struct ui_out *uiout = current_uiout;
5939
5940 annotate_signal ();
5941
5942 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5943 {
5944 struct thread_info *t = inferior_thread ();
5945
5946 ui_out_text (uiout, "\n[");
5947 ui_out_field_string (uiout, "thread-name",
5948 target_pid_to_str (t->ptid));
5949 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5950 ui_out_text (uiout, " stopped");
5951 }
5952 else
5953 {
5954 ui_out_text (uiout, "\nProgram received signal ");
5955 annotate_signal_name ();
5956 if (ui_out_is_mi_like_p (uiout))
5957 ui_out_field_string
5958 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5959 ui_out_field_string (uiout, "signal-name",
5960 gdb_signal_to_name (siggnal));
5961 annotate_signal_name_end ();
5962 ui_out_text (uiout, ", ");
5963 annotate_signal_string ();
5964 ui_out_field_string (uiout, "signal-meaning",
5965 gdb_signal_to_string (siggnal));
5966 annotate_signal_string_end ();
5967 }
5968 ui_out_text (uiout, ".\n");
5969 }
5970
5971 /* Reverse execution: target ran out of history info, print why the inferior
5972 has stopped. */
5973
5974 static void
5975 print_no_history_reason (void)
5976 {
5977 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5978 }
5979
5980 /* Print current location without a level number, if we have changed
5981 functions or hit a breakpoint. Print source line if we have one.
5982 bpstat_print contains the logic deciding in detail what to print,
5983 based on the event(s) that just occurred. */
5984
5985 void
5986 print_stop_event (struct target_waitstatus *ws)
5987 {
5988 int bpstat_ret;
5989 int source_flag;
5990 int do_frame_printing = 1;
5991 struct thread_info *tp = inferior_thread ();
5992
5993 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
5994 switch (bpstat_ret)
5995 {
5996 case PRINT_UNKNOWN:
5997 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
5998 should) carry around the function and does (or should) use
5999 that when doing a frame comparison. */
6000 if (tp->control.stop_step
6001 && frame_id_eq (tp->control.step_frame_id,
6002 get_frame_id (get_current_frame ()))
6003 && step_start_function == find_pc_function (stop_pc))
6004 {
6005 /* Finished step, just print source line. */
6006 source_flag = SRC_LINE;
6007 }
6008 else
6009 {
6010 /* Print location and source line. */
6011 source_flag = SRC_AND_LOC;
6012 }
6013 break;
6014 case PRINT_SRC_AND_LOC:
6015 /* Print location and source line. */
6016 source_flag = SRC_AND_LOC;
6017 break;
6018 case PRINT_SRC_ONLY:
6019 source_flag = SRC_LINE;
6020 break;
6021 case PRINT_NOTHING:
6022 /* Something bogus. */
6023 source_flag = SRC_LINE;
6024 do_frame_printing = 0;
6025 break;
6026 default:
6027 internal_error (__FILE__, __LINE__, _("Unknown value."));
6028 }
6029
6030 /* The behavior of this routine with respect to the source
6031 flag is:
6032 SRC_LINE: Print only source line
6033 LOCATION: Print only location
6034 SRC_AND_LOC: Print location and source line. */
6035 if (do_frame_printing)
6036 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6037
6038 /* Display the auto-display expressions. */
6039 do_displays ();
6040 }
6041
6042 /* Here to return control to GDB when the inferior stops for real.
6043 Print appropriate messages, remove breakpoints, give terminal our modes.
6044
6045 STOP_PRINT_FRAME nonzero means print the executing frame
6046 (pc, function, args, file, line number and line text).
6047 BREAKPOINTS_FAILED nonzero means stop was due to error
6048 attempting to insert breakpoints. */
6049
6050 void
6051 normal_stop (void)
6052 {
6053 struct target_waitstatus last;
6054 ptid_t last_ptid;
6055 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6056
6057 get_last_target_status (&last_ptid, &last);
6058
6059 /* If an exception is thrown from this point on, make sure to
6060 propagate GDB's knowledge of the executing state to the
6061 frontend/user running state. A QUIT is an easy exception to see
6062 here, so do this before any filtered output. */
6063 if (!non_stop)
6064 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6065 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6066 && last.kind != TARGET_WAITKIND_EXITED
6067 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6068 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6069
6070 /* As with the notification of thread events, we want to delay
6071 notifying the user that we've switched thread context until
6072 the inferior actually stops.
6073
6074 There's no point in saying anything if the inferior has exited.
6075 Note that SIGNALLED here means "exited with a signal", not
6076 "received a signal".
6077
6078 Also skip saying anything in non-stop mode. In that mode, as we
6079 don't want GDB to switch threads behind the user's back, to avoid
6080 races where the user is typing a command to apply to thread x,
6081 but GDB switches to thread y before the user finishes entering
6082 the command, fetch_inferior_event installs a cleanup to restore
6083 the current thread back to the thread the user had selected right
6084 after this event is handled, so we're not really switching, only
6085 informing of a stop. */
6086 if (!non_stop
6087 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6088 && target_has_execution
6089 && last.kind != TARGET_WAITKIND_SIGNALLED
6090 && last.kind != TARGET_WAITKIND_EXITED
6091 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6092 {
6093 target_terminal_ours_for_output ();
6094 printf_filtered (_("[Switching to %s]\n"),
6095 target_pid_to_str (inferior_ptid));
6096 annotate_thread_changed ();
6097 previous_inferior_ptid = inferior_ptid;
6098 }
6099
6100 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6101 {
6102 gdb_assert (sync_execution || !target_can_async_p ());
6103
6104 target_terminal_ours_for_output ();
6105 printf_filtered (_("No unwaited-for children left.\n"));
6106 }
6107
6108 if (!breakpoints_always_inserted_mode () && target_has_execution)
6109 {
6110 if (remove_breakpoints ())
6111 {
6112 target_terminal_ours_for_output ();
6113 printf_filtered (_("Cannot remove breakpoints because "
6114 "program is no longer writable.\nFurther "
6115 "execution is probably impossible.\n"));
6116 }
6117 }
6118
6119 /* If an auto-display called a function and that got a signal,
6120 delete that auto-display to avoid an infinite recursion. */
6121
6122 if (stopped_by_random_signal)
6123 disable_current_display ();
6124
6125 /* Don't print a message if in the middle of doing a "step n"
6126 operation for n > 1 */
6127 if (target_has_execution
6128 && last.kind != TARGET_WAITKIND_SIGNALLED
6129 && last.kind != TARGET_WAITKIND_EXITED
6130 && inferior_thread ()->step_multi
6131 && inferior_thread ()->control.stop_step)
6132 goto done;
6133
6134 target_terminal_ours ();
6135 async_enable_stdin ();
6136
6137 /* Set the current source location. This will also happen if we
6138 display the frame below, but the current SAL will be incorrect
6139 during a user hook-stop function. */
6140 if (has_stack_frames () && !stop_stack_dummy)
6141 set_current_sal_from_frame (get_current_frame (), 1);
6142
6143 /* Let the user/frontend see the threads as stopped. */
6144 do_cleanups (old_chain);
6145
6146 /* Look up the hook_stop and run it (CLI internally handles problem
6147 of stop_command's pre-hook not existing). */
6148 if (stop_command)
6149 catch_errors (hook_stop_stub, stop_command,
6150 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6151
6152 if (!has_stack_frames ())
6153 goto done;
6154
6155 if (last.kind == TARGET_WAITKIND_SIGNALLED
6156 || last.kind == TARGET_WAITKIND_EXITED)
6157 goto done;
6158
6159 /* Select innermost stack frame - i.e., current frame is frame 0,
6160 and current location is based on that.
6161 Don't do this on return from a stack dummy routine,
6162 or if the program has exited. */
6163
6164 if (!stop_stack_dummy)
6165 {
6166 select_frame (get_current_frame ());
6167
6168 /* If --batch-silent is enabled then there's no need to print the current
6169 source location, and to try risks causing an error message about
6170 missing source files. */
6171 if (stop_print_frame && !batch_silent)
6172 print_stop_event (&last);
6173 }
6174
6175 /* Save the function value return registers, if we care.
6176 We might be about to restore their previous contents. */
6177 if (inferior_thread ()->control.proceed_to_finish
6178 && execution_direction != EXEC_REVERSE)
6179 {
6180 /* This should not be necessary. */
6181 if (stop_registers)
6182 regcache_xfree (stop_registers);
6183
6184 /* NB: The copy goes through to the target picking up the value of
6185 all the registers. */
6186 stop_registers = regcache_dup (get_current_regcache ());
6187 }
6188
6189 if (stop_stack_dummy == STOP_STACK_DUMMY)
6190 {
6191 /* Pop the empty frame that contains the stack dummy.
6192 This also restores inferior state prior to the call
6193 (struct infcall_suspend_state). */
6194 struct frame_info *frame = get_current_frame ();
6195
6196 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6197 frame_pop (frame);
6198 /* frame_pop() calls reinit_frame_cache as the last thing it
6199 does which means there's currently no selected frame. We
6200 don't need to re-establish a selected frame if the dummy call
6201 returns normally, that will be done by
6202 restore_infcall_control_state. However, we do have to handle
6203 the case where the dummy call is returning after being
6204 stopped (e.g. the dummy call previously hit a breakpoint).
6205 We can't know which case we have so just always re-establish
6206 a selected frame here. */
6207 select_frame (get_current_frame ());
6208 }
6209
6210 done:
6211 annotate_stopped ();
6212
6213 /* Suppress the stop observer if we're in the middle of:
6214
6215 - a step n (n > 1), as there still more steps to be done.
6216
6217 - a "finish" command, as the observer will be called in
6218 finish_command_continuation, so it can include the inferior
6219 function's return value.
6220
6221 - calling an inferior function, as we pretend we inferior didn't
6222 run at all. The return value of the call is handled by the
6223 expression evaluator, through call_function_by_hand. */
6224
6225 if (!target_has_execution
6226 || last.kind == TARGET_WAITKIND_SIGNALLED
6227 || last.kind == TARGET_WAITKIND_EXITED
6228 || last.kind == TARGET_WAITKIND_NO_RESUMED
6229 || (!(inferior_thread ()->step_multi
6230 && inferior_thread ()->control.stop_step)
6231 && !(inferior_thread ()->control.stop_bpstat
6232 && inferior_thread ()->control.proceed_to_finish)
6233 && !inferior_thread ()->control.in_infcall))
6234 {
6235 if (!ptid_equal (inferior_ptid, null_ptid))
6236 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6237 stop_print_frame);
6238 else
6239 observer_notify_normal_stop (NULL, stop_print_frame);
6240 }
6241
6242 if (target_has_execution)
6243 {
6244 if (last.kind != TARGET_WAITKIND_SIGNALLED
6245 && last.kind != TARGET_WAITKIND_EXITED)
6246 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6247 Delete any breakpoint that is to be deleted at the next stop. */
6248 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6249 }
6250
6251 /* Try to get rid of automatically added inferiors that are no
6252 longer needed. Keeping those around slows down things linearly.
6253 Note that this never removes the current inferior. */
6254 prune_inferiors ();
6255 }
6256
6257 static int
6258 hook_stop_stub (void *cmd)
6259 {
6260 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6261 return (0);
6262 }
6263 \f
6264 int
6265 signal_stop_state (int signo)
6266 {
6267 return signal_stop[signo];
6268 }
6269
6270 int
6271 signal_print_state (int signo)
6272 {
6273 return signal_print[signo];
6274 }
6275
6276 int
6277 signal_pass_state (int signo)
6278 {
6279 return signal_program[signo];
6280 }
6281
6282 static void
6283 signal_cache_update (int signo)
6284 {
6285 if (signo == -1)
6286 {
6287 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6288 signal_cache_update (signo);
6289
6290 return;
6291 }
6292
6293 signal_pass[signo] = (signal_stop[signo] == 0
6294 && signal_print[signo] == 0
6295 && signal_program[signo] == 1
6296 && signal_catch[signo] == 0);
6297 }
6298
6299 int
6300 signal_stop_update (int signo, int state)
6301 {
6302 int ret = signal_stop[signo];
6303
6304 signal_stop[signo] = state;
6305 signal_cache_update (signo);
6306 return ret;
6307 }
6308
6309 int
6310 signal_print_update (int signo, int state)
6311 {
6312 int ret = signal_print[signo];
6313
6314 signal_print[signo] = state;
6315 signal_cache_update (signo);
6316 return ret;
6317 }
6318
6319 int
6320 signal_pass_update (int signo, int state)
6321 {
6322 int ret = signal_program[signo];
6323
6324 signal_program[signo] = state;
6325 signal_cache_update (signo);
6326 return ret;
6327 }
6328
6329 /* Update the global 'signal_catch' from INFO and notify the
6330 target. */
6331
6332 void
6333 signal_catch_update (const unsigned int *info)
6334 {
6335 int i;
6336
6337 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6338 signal_catch[i] = info[i] > 0;
6339 signal_cache_update (-1);
6340 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6341 }
6342
6343 static void
6344 sig_print_header (void)
6345 {
6346 printf_filtered (_("Signal Stop\tPrint\tPass "
6347 "to program\tDescription\n"));
6348 }
6349
6350 static void
6351 sig_print_info (enum gdb_signal oursig)
6352 {
6353 const char *name = gdb_signal_to_name (oursig);
6354 int name_padding = 13 - strlen (name);
6355
6356 if (name_padding <= 0)
6357 name_padding = 0;
6358
6359 printf_filtered ("%s", name);
6360 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6361 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6362 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6363 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6364 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6365 }
6366
6367 /* Specify how various signals in the inferior should be handled. */
6368
6369 static void
6370 handle_command (char *args, int from_tty)
6371 {
6372 char **argv;
6373 int digits, wordlen;
6374 int sigfirst, signum, siglast;
6375 enum gdb_signal oursig;
6376 int allsigs;
6377 int nsigs;
6378 unsigned char *sigs;
6379 struct cleanup *old_chain;
6380
6381 if (args == NULL)
6382 {
6383 error_no_arg (_("signal to handle"));
6384 }
6385
6386 /* Allocate and zero an array of flags for which signals to handle. */
6387
6388 nsigs = (int) GDB_SIGNAL_LAST;
6389 sigs = (unsigned char *) alloca (nsigs);
6390 memset (sigs, 0, nsigs);
6391
6392 /* Break the command line up into args. */
6393
6394 argv = gdb_buildargv (args);
6395 old_chain = make_cleanup_freeargv (argv);
6396
6397 /* Walk through the args, looking for signal oursigs, signal names, and
6398 actions. Signal numbers and signal names may be interspersed with
6399 actions, with the actions being performed for all signals cumulatively
6400 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6401
6402 while (*argv != NULL)
6403 {
6404 wordlen = strlen (*argv);
6405 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6406 {;
6407 }
6408 allsigs = 0;
6409 sigfirst = siglast = -1;
6410
6411 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6412 {
6413 /* Apply action to all signals except those used by the
6414 debugger. Silently skip those. */
6415 allsigs = 1;
6416 sigfirst = 0;
6417 siglast = nsigs - 1;
6418 }
6419 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6420 {
6421 SET_SIGS (nsigs, sigs, signal_stop);
6422 SET_SIGS (nsigs, sigs, signal_print);
6423 }
6424 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6425 {
6426 UNSET_SIGS (nsigs, sigs, signal_program);
6427 }
6428 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6429 {
6430 SET_SIGS (nsigs, sigs, signal_print);
6431 }
6432 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6433 {
6434 SET_SIGS (nsigs, sigs, signal_program);
6435 }
6436 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6437 {
6438 UNSET_SIGS (nsigs, sigs, signal_stop);
6439 }
6440 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6441 {
6442 SET_SIGS (nsigs, sigs, signal_program);
6443 }
6444 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6445 {
6446 UNSET_SIGS (nsigs, sigs, signal_print);
6447 UNSET_SIGS (nsigs, sigs, signal_stop);
6448 }
6449 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6450 {
6451 UNSET_SIGS (nsigs, sigs, signal_program);
6452 }
6453 else if (digits > 0)
6454 {
6455 /* It is numeric. The numeric signal refers to our own
6456 internal signal numbering from target.h, not to host/target
6457 signal number. This is a feature; users really should be
6458 using symbolic names anyway, and the common ones like
6459 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6460
6461 sigfirst = siglast = (int)
6462 gdb_signal_from_command (atoi (*argv));
6463 if ((*argv)[digits] == '-')
6464 {
6465 siglast = (int)
6466 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6467 }
6468 if (sigfirst > siglast)
6469 {
6470 /* Bet he didn't figure we'd think of this case... */
6471 signum = sigfirst;
6472 sigfirst = siglast;
6473 siglast = signum;
6474 }
6475 }
6476 else
6477 {
6478 oursig = gdb_signal_from_name (*argv);
6479 if (oursig != GDB_SIGNAL_UNKNOWN)
6480 {
6481 sigfirst = siglast = (int) oursig;
6482 }
6483 else
6484 {
6485 /* Not a number and not a recognized flag word => complain. */
6486 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6487 }
6488 }
6489
6490 /* If any signal numbers or symbol names were found, set flags for
6491 which signals to apply actions to. */
6492
6493 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6494 {
6495 switch ((enum gdb_signal) signum)
6496 {
6497 case GDB_SIGNAL_TRAP:
6498 case GDB_SIGNAL_INT:
6499 if (!allsigs && !sigs[signum])
6500 {
6501 if (query (_("%s is used by the debugger.\n\
6502 Are you sure you want to change it? "),
6503 gdb_signal_to_name ((enum gdb_signal) signum)))
6504 {
6505 sigs[signum] = 1;
6506 }
6507 else
6508 {
6509 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6510 gdb_flush (gdb_stdout);
6511 }
6512 }
6513 break;
6514 case GDB_SIGNAL_0:
6515 case GDB_SIGNAL_DEFAULT:
6516 case GDB_SIGNAL_UNKNOWN:
6517 /* Make sure that "all" doesn't print these. */
6518 break;
6519 default:
6520 sigs[signum] = 1;
6521 break;
6522 }
6523 }
6524
6525 argv++;
6526 }
6527
6528 for (signum = 0; signum < nsigs; signum++)
6529 if (sigs[signum])
6530 {
6531 signal_cache_update (-1);
6532 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6533 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6534
6535 if (from_tty)
6536 {
6537 /* Show the results. */
6538 sig_print_header ();
6539 for (; signum < nsigs; signum++)
6540 if (sigs[signum])
6541 sig_print_info (signum);
6542 }
6543
6544 break;
6545 }
6546
6547 do_cleanups (old_chain);
6548 }
6549
6550 /* Complete the "handle" command. */
6551
6552 static VEC (char_ptr) *
6553 handle_completer (struct cmd_list_element *ignore,
6554 const char *text, const char *word)
6555 {
6556 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6557 static const char * const keywords[] =
6558 {
6559 "all",
6560 "stop",
6561 "ignore",
6562 "print",
6563 "pass",
6564 "nostop",
6565 "noignore",
6566 "noprint",
6567 "nopass",
6568 NULL,
6569 };
6570
6571 vec_signals = signal_completer (ignore, text, word);
6572 vec_keywords = complete_on_enum (keywords, word, word);
6573
6574 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6575 VEC_free (char_ptr, vec_signals);
6576 VEC_free (char_ptr, vec_keywords);
6577 return return_val;
6578 }
6579
6580 static void
6581 xdb_handle_command (char *args, int from_tty)
6582 {
6583 char **argv;
6584 struct cleanup *old_chain;
6585
6586 if (args == NULL)
6587 error_no_arg (_("xdb command"));
6588
6589 /* Break the command line up into args. */
6590
6591 argv = gdb_buildargv (args);
6592 old_chain = make_cleanup_freeargv (argv);
6593 if (argv[1] != (char *) NULL)
6594 {
6595 char *argBuf;
6596 int bufLen;
6597
6598 bufLen = strlen (argv[0]) + 20;
6599 argBuf = (char *) xmalloc (bufLen);
6600 if (argBuf)
6601 {
6602 int validFlag = 1;
6603 enum gdb_signal oursig;
6604
6605 oursig = gdb_signal_from_name (argv[0]);
6606 memset (argBuf, 0, bufLen);
6607 if (strcmp (argv[1], "Q") == 0)
6608 sprintf (argBuf, "%s %s", argv[0], "noprint");
6609 else
6610 {
6611 if (strcmp (argv[1], "s") == 0)
6612 {
6613 if (!signal_stop[oursig])
6614 sprintf (argBuf, "%s %s", argv[0], "stop");
6615 else
6616 sprintf (argBuf, "%s %s", argv[0], "nostop");
6617 }
6618 else if (strcmp (argv[1], "i") == 0)
6619 {
6620 if (!signal_program[oursig])
6621 sprintf (argBuf, "%s %s", argv[0], "pass");
6622 else
6623 sprintf (argBuf, "%s %s", argv[0], "nopass");
6624 }
6625 else if (strcmp (argv[1], "r") == 0)
6626 {
6627 if (!signal_print[oursig])
6628 sprintf (argBuf, "%s %s", argv[0], "print");
6629 else
6630 sprintf (argBuf, "%s %s", argv[0], "noprint");
6631 }
6632 else
6633 validFlag = 0;
6634 }
6635 if (validFlag)
6636 handle_command (argBuf, from_tty);
6637 else
6638 printf_filtered (_("Invalid signal handling flag.\n"));
6639 if (argBuf)
6640 xfree (argBuf);
6641 }
6642 }
6643 do_cleanups (old_chain);
6644 }
6645
6646 enum gdb_signal
6647 gdb_signal_from_command (int num)
6648 {
6649 if (num >= 1 && num <= 15)
6650 return (enum gdb_signal) num;
6651 error (_("Only signals 1-15 are valid as numeric signals.\n\
6652 Use \"info signals\" for a list of symbolic signals."));
6653 }
6654
6655 /* Print current contents of the tables set by the handle command.
6656 It is possible we should just be printing signals actually used
6657 by the current target (but for things to work right when switching
6658 targets, all signals should be in the signal tables). */
6659
6660 static void
6661 signals_info (char *signum_exp, int from_tty)
6662 {
6663 enum gdb_signal oursig;
6664
6665 sig_print_header ();
6666
6667 if (signum_exp)
6668 {
6669 /* First see if this is a symbol name. */
6670 oursig = gdb_signal_from_name (signum_exp);
6671 if (oursig == GDB_SIGNAL_UNKNOWN)
6672 {
6673 /* No, try numeric. */
6674 oursig =
6675 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6676 }
6677 sig_print_info (oursig);
6678 return;
6679 }
6680
6681 printf_filtered ("\n");
6682 /* These ugly casts brought to you by the native VAX compiler. */
6683 for (oursig = GDB_SIGNAL_FIRST;
6684 (int) oursig < (int) GDB_SIGNAL_LAST;
6685 oursig = (enum gdb_signal) ((int) oursig + 1))
6686 {
6687 QUIT;
6688
6689 if (oursig != GDB_SIGNAL_UNKNOWN
6690 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6691 sig_print_info (oursig);
6692 }
6693
6694 printf_filtered (_("\nUse the \"handle\" command "
6695 "to change these tables.\n"));
6696 }
6697
6698 /* Check if it makes sense to read $_siginfo from the current thread
6699 at this point. If not, throw an error. */
6700
6701 static void
6702 validate_siginfo_access (void)
6703 {
6704 /* No current inferior, no siginfo. */
6705 if (ptid_equal (inferior_ptid, null_ptid))
6706 error (_("No thread selected."));
6707
6708 /* Don't try to read from a dead thread. */
6709 if (is_exited (inferior_ptid))
6710 error (_("The current thread has terminated"));
6711
6712 /* ... or from a spinning thread. */
6713 if (is_running (inferior_ptid))
6714 error (_("Selected thread is running."));
6715 }
6716
6717 /* The $_siginfo convenience variable is a bit special. We don't know
6718 for sure the type of the value until we actually have a chance to
6719 fetch the data. The type can change depending on gdbarch, so it is
6720 also dependent on which thread you have selected.
6721
6722 1. making $_siginfo be an internalvar that creates a new value on
6723 access.
6724
6725 2. making the value of $_siginfo be an lval_computed value. */
6726
6727 /* This function implements the lval_computed support for reading a
6728 $_siginfo value. */
6729
6730 static void
6731 siginfo_value_read (struct value *v)
6732 {
6733 LONGEST transferred;
6734
6735 validate_siginfo_access ();
6736
6737 transferred =
6738 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6739 NULL,
6740 value_contents_all_raw (v),
6741 value_offset (v),
6742 TYPE_LENGTH (value_type (v)));
6743
6744 if (transferred != TYPE_LENGTH (value_type (v)))
6745 error (_("Unable to read siginfo"));
6746 }
6747
6748 /* This function implements the lval_computed support for writing a
6749 $_siginfo value. */
6750
6751 static void
6752 siginfo_value_write (struct value *v, struct value *fromval)
6753 {
6754 LONGEST transferred;
6755
6756 validate_siginfo_access ();
6757
6758 transferred = target_write (&current_target,
6759 TARGET_OBJECT_SIGNAL_INFO,
6760 NULL,
6761 value_contents_all_raw (fromval),
6762 value_offset (v),
6763 TYPE_LENGTH (value_type (fromval)));
6764
6765 if (transferred != TYPE_LENGTH (value_type (fromval)))
6766 error (_("Unable to write siginfo"));
6767 }
6768
6769 static const struct lval_funcs siginfo_value_funcs =
6770 {
6771 siginfo_value_read,
6772 siginfo_value_write
6773 };
6774
6775 /* Return a new value with the correct type for the siginfo object of
6776 the current thread using architecture GDBARCH. Return a void value
6777 if there's no object available. */
6778
6779 static struct value *
6780 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6781 void *ignore)
6782 {
6783 if (target_has_stack
6784 && !ptid_equal (inferior_ptid, null_ptid)
6785 && gdbarch_get_siginfo_type_p (gdbarch))
6786 {
6787 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6788
6789 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6790 }
6791
6792 return allocate_value (builtin_type (gdbarch)->builtin_void);
6793 }
6794
6795 \f
6796 /* infcall_suspend_state contains state about the program itself like its
6797 registers and any signal it received when it last stopped.
6798 This state must be restored regardless of how the inferior function call
6799 ends (either successfully, or after it hits a breakpoint or signal)
6800 if the program is to properly continue where it left off. */
6801
6802 struct infcall_suspend_state
6803 {
6804 struct thread_suspend_state thread_suspend;
6805 #if 0 /* Currently unused and empty structures are not valid C. */
6806 struct inferior_suspend_state inferior_suspend;
6807 #endif
6808
6809 /* Other fields: */
6810 CORE_ADDR stop_pc;
6811 struct regcache *registers;
6812
6813 /* Format of SIGINFO_DATA or NULL if it is not present. */
6814 struct gdbarch *siginfo_gdbarch;
6815
6816 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6817 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6818 content would be invalid. */
6819 gdb_byte *siginfo_data;
6820 };
6821
6822 struct infcall_suspend_state *
6823 save_infcall_suspend_state (void)
6824 {
6825 struct infcall_suspend_state *inf_state;
6826 struct thread_info *tp = inferior_thread ();
6827 #if 0
6828 struct inferior *inf = current_inferior ();
6829 #endif
6830 struct regcache *regcache = get_current_regcache ();
6831 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6832 gdb_byte *siginfo_data = NULL;
6833
6834 if (gdbarch_get_siginfo_type_p (gdbarch))
6835 {
6836 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6837 size_t len = TYPE_LENGTH (type);
6838 struct cleanup *back_to;
6839
6840 siginfo_data = xmalloc (len);
6841 back_to = make_cleanup (xfree, siginfo_data);
6842
6843 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6844 siginfo_data, 0, len) == len)
6845 discard_cleanups (back_to);
6846 else
6847 {
6848 /* Errors ignored. */
6849 do_cleanups (back_to);
6850 siginfo_data = NULL;
6851 }
6852 }
6853
6854 inf_state = XCNEW (struct infcall_suspend_state);
6855
6856 if (siginfo_data)
6857 {
6858 inf_state->siginfo_gdbarch = gdbarch;
6859 inf_state->siginfo_data = siginfo_data;
6860 }
6861
6862 inf_state->thread_suspend = tp->suspend;
6863 #if 0 /* Currently unused and empty structures are not valid C. */
6864 inf_state->inferior_suspend = inf->suspend;
6865 #endif
6866
6867 /* run_inferior_call will not use the signal due to its `proceed' call with
6868 GDB_SIGNAL_0 anyway. */
6869 tp->suspend.stop_signal = GDB_SIGNAL_0;
6870
6871 inf_state->stop_pc = stop_pc;
6872
6873 inf_state->registers = regcache_dup (regcache);
6874
6875 return inf_state;
6876 }
6877
6878 /* Restore inferior session state to INF_STATE. */
6879
6880 void
6881 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6882 {
6883 struct thread_info *tp = inferior_thread ();
6884 #if 0
6885 struct inferior *inf = current_inferior ();
6886 #endif
6887 struct regcache *regcache = get_current_regcache ();
6888 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6889
6890 tp->suspend = inf_state->thread_suspend;
6891 #if 0 /* Currently unused and empty structures are not valid C. */
6892 inf->suspend = inf_state->inferior_suspend;
6893 #endif
6894
6895 stop_pc = inf_state->stop_pc;
6896
6897 if (inf_state->siginfo_gdbarch == gdbarch)
6898 {
6899 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6900
6901 /* Errors ignored. */
6902 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6903 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6904 }
6905
6906 /* The inferior can be gone if the user types "print exit(0)"
6907 (and perhaps other times). */
6908 if (target_has_execution)
6909 /* NB: The register write goes through to the target. */
6910 regcache_cpy (regcache, inf_state->registers);
6911
6912 discard_infcall_suspend_state (inf_state);
6913 }
6914
6915 static void
6916 do_restore_infcall_suspend_state_cleanup (void *state)
6917 {
6918 restore_infcall_suspend_state (state);
6919 }
6920
6921 struct cleanup *
6922 make_cleanup_restore_infcall_suspend_state
6923 (struct infcall_suspend_state *inf_state)
6924 {
6925 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6926 }
6927
6928 void
6929 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6930 {
6931 regcache_xfree (inf_state->registers);
6932 xfree (inf_state->siginfo_data);
6933 xfree (inf_state);
6934 }
6935
6936 struct regcache *
6937 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6938 {
6939 return inf_state->registers;
6940 }
6941
6942 /* infcall_control_state contains state regarding gdb's control of the
6943 inferior itself like stepping control. It also contains session state like
6944 the user's currently selected frame. */
6945
6946 struct infcall_control_state
6947 {
6948 struct thread_control_state thread_control;
6949 struct inferior_control_state inferior_control;
6950
6951 /* Other fields: */
6952 enum stop_stack_kind stop_stack_dummy;
6953 int stopped_by_random_signal;
6954 int stop_after_trap;
6955
6956 /* ID if the selected frame when the inferior function call was made. */
6957 struct frame_id selected_frame_id;
6958 };
6959
6960 /* Save all of the information associated with the inferior<==>gdb
6961 connection. */
6962
6963 struct infcall_control_state *
6964 save_infcall_control_state (void)
6965 {
6966 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6967 struct thread_info *tp = inferior_thread ();
6968 struct inferior *inf = current_inferior ();
6969
6970 inf_status->thread_control = tp->control;
6971 inf_status->inferior_control = inf->control;
6972
6973 tp->control.step_resume_breakpoint = NULL;
6974 tp->control.exception_resume_breakpoint = NULL;
6975
6976 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6977 chain. If caller's caller is walking the chain, they'll be happier if we
6978 hand them back the original chain when restore_infcall_control_state is
6979 called. */
6980 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6981
6982 /* Other fields: */
6983 inf_status->stop_stack_dummy = stop_stack_dummy;
6984 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6985 inf_status->stop_after_trap = stop_after_trap;
6986
6987 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6988
6989 return inf_status;
6990 }
6991
6992 static int
6993 restore_selected_frame (void *args)
6994 {
6995 struct frame_id *fid = (struct frame_id *) args;
6996 struct frame_info *frame;
6997
6998 frame = frame_find_by_id (*fid);
6999
7000 /* If inf_status->selected_frame_id is NULL, there was no previously
7001 selected frame. */
7002 if (frame == NULL)
7003 {
7004 warning (_("Unable to restore previously selected frame."));
7005 return 0;
7006 }
7007
7008 select_frame (frame);
7009
7010 return (1);
7011 }
7012
7013 /* Restore inferior session state to INF_STATUS. */
7014
7015 void
7016 restore_infcall_control_state (struct infcall_control_state *inf_status)
7017 {
7018 struct thread_info *tp = inferior_thread ();
7019 struct inferior *inf = current_inferior ();
7020
7021 if (tp->control.step_resume_breakpoint)
7022 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7023
7024 if (tp->control.exception_resume_breakpoint)
7025 tp->control.exception_resume_breakpoint->disposition
7026 = disp_del_at_next_stop;
7027
7028 /* Handle the bpstat_copy of the chain. */
7029 bpstat_clear (&tp->control.stop_bpstat);
7030
7031 tp->control = inf_status->thread_control;
7032 inf->control = inf_status->inferior_control;
7033
7034 /* Other fields: */
7035 stop_stack_dummy = inf_status->stop_stack_dummy;
7036 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7037 stop_after_trap = inf_status->stop_after_trap;
7038
7039 if (target_has_stack)
7040 {
7041 /* The point of catch_errors is that if the stack is clobbered,
7042 walking the stack might encounter a garbage pointer and
7043 error() trying to dereference it. */
7044 if (catch_errors
7045 (restore_selected_frame, &inf_status->selected_frame_id,
7046 "Unable to restore previously selected frame:\n",
7047 RETURN_MASK_ERROR) == 0)
7048 /* Error in restoring the selected frame. Select the innermost
7049 frame. */
7050 select_frame (get_current_frame ());
7051 }
7052
7053 xfree (inf_status);
7054 }
7055
7056 static void
7057 do_restore_infcall_control_state_cleanup (void *sts)
7058 {
7059 restore_infcall_control_state (sts);
7060 }
7061
7062 struct cleanup *
7063 make_cleanup_restore_infcall_control_state
7064 (struct infcall_control_state *inf_status)
7065 {
7066 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7067 }
7068
7069 void
7070 discard_infcall_control_state (struct infcall_control_state *inf_status)
7071 {
7072 if (inf_status->thread_control.step_resume_breakpoint)
7073 inf_status->thread_control.step_resume_breakpoint->disposition
7074 = disp_del_at_next_stop;
7075
7076 if (inf_status->thread_control.exception_resume_breakpoint)
7077 inf_status->thread_control.exception_resume_breakpoint->disposition
7078 = disp_del_at_next_stop;
7079
7080 /* See save_infcall_control_state for info on stop_bpstat. */
7081 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7082
7083 xfree (inf_status);
7084 }
7085 \f
7086 /* restore_inferior_ptid() will be used by the cleanup machinery
7087 to restore the inferior_ptid value saved in a call to
7088 save_inferior_ptid(). */
7089
7090 static void
7091 restore_inferior_ptid (void *arg)
7092 {
7093 ptid_t *saved_ptid_ptr = arg;
7094
7095 inferior_ptid = *saved_ptid_ptr;
7096 xfree (arg);
7097 }
7098
7099 /* Save the value of inferior_ptid so that it may be restored by a
7100 later call to do_cleanups(). Returns the struct cleanup pointer
7101 needed for later doing the cleanup. */
7102
7103 struct cleanup *
7104 save_inferior_ptid (void)
7105 {
7106 ptid_t *saved_ptid_ptr;
7107
7108 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7109 *saved_ptid_ptr = inferior_ptid;
7110 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7111 }
7112
7113 /* See inferior.h. */
7114
7115 void
7116 clear_exit_convenience_vars (void)
7117 {
7118 clear_internalvar (lookup_internalvar ("_exitsignal"));
7119 clear_internalvar (lookup_internalvar ("_exitcode"));
7120 }
7121 \f
7122
7123 /* User interface for reverse debugging:
7124 Set exec-direction / show exec-direction commands
7125 (returns error unless target implements to_set_exec_direction method). */
7126
7127 int execution_direction = EXEC_FORWARD;
7128 static const char exec_forward[] = "forward";
7129 static const char exec_reverse[] = "reverse";
7130 static const char *exec_direction = exec_forward;
7131 static const char *const exec_direction_names[] = {
7132 exec_forward,
7133 exec_reverse,
7134 NULL
7135 };
7136
7137 static void
7138 set_exec_direction_func (char *args, int from_tty,
7139 struct cmd_list_element *cmd)
7140 {
7141 if (target_can_execute_reverse)
7142 {
7143 if (!strcmp (exec_direction, exec_forward))
7144 execution_direction = EXEC_FORWARD;
7145 else if (!strcmp (exec_direction, exec_reverse))
7146 execution_direction = EXEC_REVERSE;
7147 }
7148 else
7149 {
7150 exec_direction = exec_forward;
7151 error (_("Target does not support this operation."));
7152 }
7153 }
7154
7155 static void
7156 show_exec_direction_func (struct ui_file *out, int from_tty,
7157 struct cmd_list_element *cmd, const char *value)
7158 {
7159 switch (execution_direction) {
7160 case EXEC_FORWARD:
7161 fprintf_filtered (out, _("Forward.\n"));
7162 break;
7163 case EXEC_REVERSE:
7164 fprintf_filtered (out, _("Reverse.\n"));
7165 break;
7166 default:
7167 internal_error (__FILE__, __LINE__,
7168 _("bogus execution_direction value: %d"),
7169 (int) execution_direction);
7170 }
7171 }
7172
7173 static void
7174 show_schedule_multiple (struct ui_file *file, int from_tty,
7175 struct cmd_list_element *c, const char *value)
7176 {
7177 fprintf_filtered (file, _("Resuming the execution of threads "
7178 "of all processes is %s.\n"), value);
7179 }
7180
7181 /* Implementation of `siginfo' variable. */
7182
7183 static const struct internalvar_funcs siginfo_funcs =
7184 {
7185 siginfo_make_value,
7186 NULL,
7187 NULL
7188 };
7189
7190 void
7191 _initialize_infrun (void)
7192 {
7193 int i;
7194 int numsigs;
7195 struct cmd_list_element *c;
7196
7197 add_info ("signals", signals_info, _("\
7198 What debugger does when program gets various signals.\n\
7199 Specify a signal as argument to print info on that signal only."));
7200 add_info_alias ("handle", "signals", 0);
7201
7202 c = add_com ("handle", class_run, handle_command, _("\
7203 Specify how to handle signals.\n\
7204 Usage: handle SIGNAL [ACTIONS]\n\
7205 Args are signals and actions to apply to those signals.\n\
7206 If no actions are specified, the current settings for the specified signals\n\
7207 will be displayed instead.\n\
7208 \n\
7209 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7210 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7211 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7212 The special arg \"all\" is recognized to mean all signals except those\n\
7213 used by the debugger, typically SIGTRAP and SIGINT.\n\
7214 \n\
7215 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7216 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7217 Stop means reenter debugger if this signal happens (implies print).\n\
7218 Print means print a message if this signal happens.\n\
7219 Pass means let program see this signal; otherwise program doesn't know.\n\
7220 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7221 Pass and Stop may be combined.\n\
7222 \n\
7223 Multiple signals may be specified. Signal numbers and signal names\n\
7224 may be interspersed with actions, with the actions being performed for\n\
7225 all signals cumulatively specified."));
7226 set_cmd_completer (c, handle_completer);
7227
7228 if (xdb_commands)
7229 {
7230 add_com ("lz", class_info, signals_info, _("\
7231 What debugger does when program gets various signals.\n\
7232 Specify a signal as argument to print info on that signal only."));
7233 add_com ("z", class_run, xdb_handle_command, _("\
7234 Specify how to handle a signal.\n\
7235 Args are signals and actions to apply to those signals.\n\
7236 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7237 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7238 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7239 The special arg \"all\" is recognized to mean all signals except those\n\
7240 used by the debugger, typically SIGTRAP and SIGINT.\n\
7241 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7242 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7243 nopass), \"Q\" (noprint)\n\
7244 Stop means reenter debugger if this signal happens (implies print).\n\
7245 Print means print a message if this signal happens.\n\
7246 Pass means let program see this signal; otherwise program doesn't know.\n\
7247 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7248 Pass and Stop may be combined."));
7249 }
7250
7251 if (!dbx_commands)
7252 stop_command = add_cmd ("stop", class_obscure,
7253 not_just_help_class_command, _("\
7254 There is no `stop' command, but you can set a hook on `stop'.\n\
7255 This allows you to set a list of commands to be run each time execution\n\
7256 of the program stops."), &cmdlist);
7257
7258 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7259 Set inferior debugging."), _("\
7260 Show inferior debugging."), _("\
7261 When non-zero, inferior specific debugging is enabled."),
7262 NULL,
7263 show_debug_infrun,
7264 &setdebuglist, &showdebuglist);
7265
7266 add_setshow_boolean_cmd ("displaced", class_maintenance,
7267 &debug_displaced, _("\
7268 Set displaced stepping debugging."), _("\
7269 Show displaced stepping debugging."), _("\
7270 When non-zero, displaced stepping specific debugging is enabled."),
7271 NULL,
7272 show_debug_displaced,
7273 &setdebuglist, &showdebuglist);
7274
7275 add_setshow_boolean_cmd ("non-stop", no_class,
7276 &non_stop_1, _("\
7277 Set whether gdb controls the inferior in non-stop mode."), _("\
7278 Show whether gdb controls the inferior in non-stop mode."), _("\
7279 When debugging a multi-threaded program and this setting is\n\
7280 off (the default, also called all-stop mode), when one thread stops\n\
7281 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7282 all other threads in the program while you interact with the thread of\n\
7283 interest. When you continue or step a thread, you can allow the other\n\
7284 threads to run, or have them remain stopped, but while you inspect any\n\
7285 thread's state, all threads stop.\n\
7286 \n\
7287 In non-stop mode, when one thread stops, other threads can continue\n\
7288 to run freely. You'll be able to step each thread independently,\n\
7289 leave it stopped or free to run as needed."),
7290 set_non_stop,
7291 show_non_stop,
7292 &setlist,
7293 &showlist);
7294
7295 numsigs = (int) GDB_SIGNAL_LAST;
7296 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7297 signal_print = (unsigned char *)
7298 xmalloc (sizeof (signal_print[0]) * numsigs);
7299 signal_program = (unsigned char *)
7300 xmalloc (sizeof (signal_program[0]) * numsigs);
7301 signal_catch = (unsigned char *)
7302 xmalloc (sizeof (signal_catch[0]) * numsigs);
7303 signal_pass = (unsigned char *)
7304 xmalloc (sizeof (signal_program[0]) * numsigs);
7305 for (i = 0; i < numsigs; i++)
7306 {
7307 signal_stop[i] = 1;
7308 signal_print[i] = 1;
7309 signal_program[i] = 1;
7310 signal_catch[i] = 0;
7311 }
7312
7313 /* Signals caused by debugger's own actions
7314 should not be given to the program afterwards. */
7315 signal_program[GDB_SIGNAL_TRAP] = 0;
7316 signal_program[GDB_SIGNAL_INT] = 0;
7317
7318 /* Signals that are not errors should not normally enter the debugger. */
7319 signal_stop[GDB_SIGNAL_ALRM] = 0;
7320 signal_print[GDB_SIGNAL_ALRM] = 0;
7321 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7322 signal_print[GDB_SIGNAL_VTALRM] = 0;
7323 signal_stop[GDB_SIGNAL_PROF] = 0;
7324 signal_print[GDB_SIGNAL_PROF] = 0;
7325 signal_stop[GDB_SIGNAL_CHLD] = 0;
7326 signal_print[GDB_SIGNAL_CHLD] = 0;
7327 signal_stop[GDB_SIGNAL_IO] = 0;
7328 signal_print[GDB_SIGNAL_IO] = 0;
7329 signal_stop[GDB_SIGNAL_POLL] = 0;
7330 signal_print[GDB_SIGNAL_POLL] = 0;
7331 signal_stop[GDB_SIGNAL_URG] = 0;
7332 signal_print[GDB_SIGNAL_URG] = 0;
7333 signal_stop[GDB_SIGNAL_WINCH] = 0;
7334 signal_print[GDB_SIGNAL_WINCH] = 0;
7335 signal_stop[GDB_SIGNAL_PRIO] = 0;
7336 signal_print[GDB_SIGNAL_PRIO] = 0;
7337
7338 /* These signals are used internally by user-level thread
7339 implementations. (See signal(5) on Solaris.) Like the above
7340 signals, a healthy program receives and handles them as part of
7341 its normal operation. */
7342 signal_stop[GDB_SIGNAL_LWP] = 0;
7343 signal_print[GDB_SIGNAL_LWP] = 0;
7344 signal_stop[GDB_SIGNAL_WAITING] = 0;
7345 signal_print[GDB_SIGNAL_WAITING] = 0;
7346 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7347 signal_print[GDB_SIGNAL_CANCEL] = 0;
7348
7349 /* Update cached state. */
7350 signal_cache_update (-1);
7351
7352 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7353 &stop_on_solib_events, _("\
7354 Set stopping for shared library events."), _("\
7355 Show stopping for shared library events."), _("\
7356 If nonzero, gdb will give control to the user when the dynamic linker\n\
7357 notifies gdb of shared library events. The most common event of interest\n\
7358 to the user would be loading/unloading of a new library."),
7359 set_stop_on_solib_events,
7360 show_stop_on_solib_events,
7361 &setlist, &showlist);
7362
7363 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7364 follow_fork_mode_kind_names,
7365 &follow_fork_mode_string, _("\
7366 Set debugger response to a program call of fork or vfork."), _("\
7367 Show debugger response to a program call of fork or vfork."), _("\
7368 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7369 parent - the original process is debugged after a fork\n\
7370 child - the new process is debugged after a fork\n\
7371 The unfollowed process will continue to run.\n\
7372 By default, the debugger will follow the parent process."),
7373 NULL,
7374 show_follow_fork_mode_string,
7375 &setlist, &showlist);
7376
7377 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7378 follow_exec_mode_names,
7379 &follow_exec_mode_string, _("\
7380 Set debugger response to a program call of exec."), _("\
7381 Show debugger response to a program call of exec."), _("\
7382 An exec call replaces the program image of a process.\n\
7383 \n\
7384 follow-exec-mode can be:\n\
7385 \n\
7386 new - the debugger creates a new inferior and rebinds the process\n\
7387 to this new inferior. The program the process was running before\n\
7388 the exec call can be restarted afterwards by restarting the original\n\
7389 inferior.\n\
7390 \n\
7391 same - the debugger keeps the process bound to the same inferior.\n\
7392 The new executable image replaces the previous executable loaded in\n\
7393 the inferior. Restarting the inferior after the exec call restarts\n\
7394 the executable the process was running after the exec call.\n\
7395 \n\
7396 By default, the debugger will use the same inferior."),
7397 NULL,
7398 show_follow_exec_mode_string,
7399 &setlist, &showlist);
7400
7401 add_setshow_enum_cmd ("scheduler-locking", class_run,
7402 scheduler_enums, &scheduler_mode, _("\
7403 Set mode for locking scheduler during execution."), _("\
7404 Show mode for locking scheduler during execution."), _("\
7405 off == no locking (threads may preempt at any time)\n\
7406 on == full locking (no thread except the current thread may run)\n\
7407 step == scheduler locked during every single-step operation.\n\
7408 In this mode, no other thread may run during a step command.\n\
7409 Other threads may run while stepping over a function call ('next')."),
7410 set_schedlock_func, /* traps on target vector */
7411 show_scheduler_mode,
7412 &setlist, &showlist);
7413
7414 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7415 Set mode for resuming threads of all processes."), _("\
7416 Show mode for resuming threads of all processes."), _("\
7417 When on, execution commands (such as 'continue' or 'next') resume all\n\
7418 threads of all processes. When off (which is the default), execution\n\
7419 commands only resume the threads of the current process. The set of\n\
7420 threads that are resumed is further refined by the scheduler-locking\n\
7421 mode (see help set scheduler-locking)."),
7422 NULL,
7423 show_schedule_multiple,
7424 &setlist, &showlist);
7425
7426 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7427 Set mode of the step operation."), _("\
7428 Show mode of the step operation."), _("\
7429 When set, doing a step over a function without debug line information\n\
7430 will stop at the first instruction of that function. Otherwise, the\n\
7431 function is skipped and the step command stops at a different source line."),
7432 NULL,
7433 show_step_stop_if_no_debug,
7434 &setlist, &showlist);
7435
7436 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7437 &can_use_displaced_stepping, _("\
7438 Set debugger's willingness to use displaced stepping."), _("\
7439 Show debugger's willingness to use displaced stepping."), _("\
7440 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7441 supported by the target architecture. If off, gdb will not use displaced\n\
7442 stepping to step over breakpoints, even if such is supported by the target\n\
7443 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7444 if the target architecture supports it and non-stop mode is active, but will not\n\
7445 use it in all-stop mode (see help set non-stop)."),
7446 NULL,
7447 show_can_use_displaced_stepping,
7448 &setlist, &showlist);
7449
7450 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7451 &exec_direction, _("Set direction of execution.\n\
7452 Options are 'forward' or 'reverse'."),
7453 _("Show direction of execution (forward/reverse)."),
7454 _("Tells gdb whether to execute forward or backward."),
7455 set_exec_direction_func, show_exec_direction_func,
7456 &setlist, &showlist);
7457
7458 /* Set/show detach-on-fork: user-settable mode. */
7459
7460 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7461 Set whether gdb will detach the child of a fork."), _("\
7462 Show whether gdb will detach the child of a fork."), _("\
7463 Tells gdb whether to detach the child of a fork."),
7464 NULL, NULL, &setlist, &showlist);
7465
7466 /* Set/show disable address space randomization mode. */
7467
7468 add_setshow_boolean_cmd ("disable-randomization", class_support,
7469 &disable_randomization, _("\
7470 Set disabling of debuggee's virtual address space randomization."), _("\
7471 Show disabling of debuggee's virtual address space randomization."), _("\
7472 When this mode is on (which is the default), randomization of the virtual\n\
7473 address space is disabled. Standalone programs run with the randomization\n\
7474 enabled by default on some platforms."),
7475 &set_disable_randomization,
7476 &show_disable_randomization,
7477 &setlist, &showlist);
7478
7479 /* ptid initializations */
7480 inferior_ptid = null_ptid;
7481 target_last_wait_ptid = minus_one_ptid;
7482
7483 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7484 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7485 observer_attach_thread_exit (infrun_thread_thread_exit);
7486 observer_attach_inferior_exit (infrun_inferior_exit);
7487
7488 /* Explicitly create without lookup, since that tries to create a
7489 value with a void typed value, and when we get here, gdbarch
7490 isn't initialized yet. At this point, we're quite sure there
7491 isn't another convenience variable of the same name. */
7492 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7493
7494 add_setshow_boolean_cmd ("observer", no_class,
7495 &observer_mode_1, _("\
7496 Set whether gdb controls the inferior in observer mode."), _("\
7497 Show whether gdb controls the inferior in observer mode."), _("\
7498 In observer mode, GDB can get data from the inferior, but not\n\
7499 affect its execution. Registers and memory may not be changed,\n\
7500 breakpoints may not be set, and the program cannot be interrupted\n\
7501 or signalled."),
7502 set_observer_mode,
7503 show_observer_mode,
7504 &setlist,
7505 &showlist);
7506 }
This page took 0.217483 seconds and 4 git commands to generate.