enable target async by default; separate MI and target notions of async
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2014 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <string.h>
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "exceptions.h"
29 #include "breakpoint.h"
30 #include "gdb_wait.h"
31 #include "gdbcore.h"
32 #include "gdbcmd.h"
33 #include "cli/cli-script.h"
34 #include "target.h"
35 #include "gdbthread.h"
36 #include "annotate.h"
37 #include "symfile.h"
38 #include "top.h"
39 #include <signal.h>
40 #include "inf-loop.h"
41 #include "regcache.h"
42 #include "value.h"
43 #include "observer.h"
44 #include "language.h"
45 #include "solib.h"
46 #include "main.h"
47 #include "dictionary.h"
48 #include "block.h"
49 #include "gdb_assert.h"
50 #include "mi/mi-common.h"
51 #include "event-top.h"
52 #include "record.h"
53 #include "record-full.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58 #include "interps.h"
59 #include "skip.h"
60 #include "probe.h"
61 #include "objfiles.h"
62 #include "completer.h"
63 #include "target-descriptions.h"
64 #include "target-dcache.h"
65
66 /* Prototypes for local functions */
67
68 static void signals_info (char *, int);
69
70 static void handle_command (char *, int);
71
72 static void sig_print_info (enum gdb_signal);
73
74 static void sig_print_header (void);
75
76 static void resume_cleanups (void *);
77
78 static int hook_stop_stub (void *);
79
80 static int restore_selected_frame (void *);
81
82 static int follow_fork (void);
83
84 static void set_schedlock_func (char *args, int from_tty,
85 struct cmd_list_element *c);
86
87 static int currently_stepping (struct thread_info *tp);
88
89 static void xdb_handle_command (char *args, int from_tty);
90
91 static void end_stepping_range (void);
92
93 void _initialize_infrun (void);
94
95 void nullify_last_target_wait_ptid (void);
96
97 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
98
99 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
100
101 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
102
103 /* When set, stop the 'step' command if we enter a function which has
104 no line number information. The normal behavior is that we step
105 over such function. */
106 int step_stop_if_no_debug = 0;
107 static void
108 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
109 struct cmd_list_element *c, const char *value)
110 {
111 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
112 }
113
114 /* In asynchronous mode, but simulating synchronous execution. */
115
116 int sync_execution = 0;
117
118 /* proceed and normal_stop use this to notify the user when the
119 inferior stopped in a different thread than it had been running
120 in. */
121
122 static ptid_t previous_inferior_ptid;
123
124 /* If set (default for legacy reasons), when following a fork, GDB
125 will detach from one of the fork branches, child or parent.
126 Exactly which branch is detached depends on 'set follow-fork-mode'
127 setting. */
128
129 static int detach_fork = 1;
130
131 int debug_displaced = 0;
132 static void
133 show_debug_displaced (struct ui_file *file, int from_tty,
134 struct cmd_list_element *c, const char *value)
135 {
136 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
137 }
138
139 unsigned int debug_infrun = 0;
140 static void
141 show_debug_infrun (struct ui_file *file, int from_tty,
142 struct cmd_list_element *c, const char *value)
143 {
144 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
145 }
146
147
148 /* Support for disabling address space randomization. */
149
150 int disable_randomization = 1;
151
152 static void
153 show_disable_randomization (struct ui_file *file, int from_tty,
154 struct cmd_list_element *c, const char *value)
155 {
156 if (target_supports_disable_randomization ())
157 fprintf_filtered (file,
158 _("Disabling randomization of debuggee's "
159 "virtual address space is %s.\n"),
160 value);
161 else
162 fputs_filtered (_("Disabling randomization of debuggee's "
163 "virtual address space is unsupported on\n"
164 "this platform.\n"), file);
165 }
166
167 static void
168 set_disable_randomization (char *args, int from_tty,
169 struct cmd_list_element *c)
170 {
171 if (!target_supports_disable_randomization ())
172 error (_("Disabling randomization of debuggee's "
173 "virtual address space is unsupported on\n"
174 "this platform."));
175 }
176
177 /* User interface for non-stop mode. */
178
179 int non_stop = 0;
180 static int non_stop_1 = 0;
181
182 static void
183 set_non_stop (char *args, int from_tty,
184 struct cmd_list_element *c)
185 {
186 if (target_has_execution)
187 {
188 non_stop_1 = non_stop;
189 error (_("Cannot change this setting while the inferior is running."));
190 }
191
192 non_stop = non_stop_1;
193 }
194
195 static void
196 show_non_stop (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file,
200 _("Controlling the inferior in non-stop mode is %s.\n"),
201 value);
202 }
203
204 /* "Observer mode" is somewhat like a more extreme version of
205 non-stop, in which all GDB operations that might affect the
206 target's execution have been disabled. */
207
208 int observer_mode = 0;
209 static int observer_mode_1 = 0;
210
211 static void
212 set_observer_mode (char *args, int from_tty,
213 struct cmd_list_element *c)
214 {
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 pagination_enabled = 0;
239 non_stop = non_stop_1 = 1;
240 }
241
242 if (from_tty)
243 printf_filtered (_("Observer mode is now %s.\n"),
244 (observer_mode ? "on" : "off"));
245 }
246
247 static void
248 show_observer_mode (struct ui_file *file, int from_tty,
249 struct cmd_list_element *c, const char *value)
250 {
251 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
252 }
253
254 /* This updates the value of observer mode based on changes in
255 permissions. Note that we are deliberately ignoring the values of
256 may-write-registers and may-write-memory, since the user may have
257 reason to enable these during a session, for instance to turn on a
258 debugging-related global. */
259
260 void
261 update_observer_mode (void)
262 {
263 int newval;
264
265 newval = (!may_insert_breakpoints
266 && !may_insert_tracepoints
267 && may_insert_fast_tracepoints
268 && !may_stop
269 && non_stop);
270
271 /* Let the user know if things change. */
272 if (newval != observer_mode)
273 printf_filtered (_("Observer mode is now %s.\n"),
274 (newval ? "on" : "off"));
275
276 observer_mode = observer_mode_1 = newval;
277 }
278
279 /* Tables of how to react to signals; the user sets them. */
280
281 static unsigned char *signal_stop;
282 static unsigned char *signal_print;
283 static unsigned char *signal_program;
284
285 /* Table of signals that are registered with "catch signal". A
286 non-zero entry indicates that the signal is caught by some "catch
287 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
288 signals. */
289 static unsigned char *signal_catch;
290
291 /* Table of signals that the target may silently handle.
292 This is automatically determined from the flags above,
293 and simply cached here. */
294 static unsigned char *signal_pass;
295
296 #define SET_SIGS(nsigs,sigs,flags) \
297 do { \
298 int signum = (nsigs); \
299 while (signum-- > 0) \
300 if ((sigs)[signum]) \
301 (flags)[signum] = 1; \
302 } while (0)
303
304 #define UNSET_SIGS(nsigs,sigs,flags) \
305 do { \
306 int signum = (nsigs); \
307 while (signum-- > 0) \
308 if ((sigs)[signum]) \
309 (flags)[signum] = 0; \
310 } while (0)
311
312 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
313 this function is to avoid exporting `signal_program'. */
314
315 void
316 update_signals_program_target (void)
317 {
318 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
319 }
320
321 /* Value to pass to target_resume() to cause all threads to resume. */
322
323 #define RESUME_ALL minus_one_ptid
324
325 /* Command list pointer for the "stop" placeholder. */
326
327 static struct cmd_list_element *stop_command;
328
329 /* Function inferior was in as of last step command. */
330
331 static struct symbol *step_start_function;
332
333 /* Nonzero if we want to give control to the user when we're notified
334 of shared library events by the dynamic linker. */
335 int stop_on_solib_events;
336
337 /* Enable or disable optional shared library event breakpoints
338 as appropriate when the above flag is changed. */
339
340 static void
341 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
342 {
343 update_solib_breakpoints ();
344 }
345
346 static void
347 show_stop_on_solib_events (struct ui_file *file, int from_tty,
348 struct cmd_list_element *c, const char *value)
349 {
350 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
351 value);
352 }
353
354 /* Nonzero means expecting a trace trap
355 and should stop the inferior and return silently when it happens. */
356
357 int stop_after_trap;
358
359 /* Save register contents here when executing a "finish" command or are
360 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
361 Thus this contains the return value from the called function (assuming
362 values are returned in a register). */
363
364 struct regcache *stop_registers;
365
366 /* Nonzero after stop if current stack frame should be printed. */
367
368 static int stop_print_frame;
369
370 /* This is a cached copy of the pid/waitstatus of the last event
371 returned by target_wait()/deprecated_target_wait_hook(). This
372 information is returned by get_last_target_status(). */
373 static ptid_t target_last_wait_ptid;
374 static struct target_waitstatus target_last_waitstatus;
375
376 static void context_switch (ptid_t ptid);
377
378 void init_thread_stepping_state (struct thread_info *tss);
379
380 static void init_infwait_state (void);
381
382 static const char follow_fork_mode_child[] = "child";
383 static const char follow_fork_mode_parent[] = "parent";
384
385 static const char *const follow_fork_mode_kind_names[] = {
386 follow_fork_mode_child,
387 follow_fork_mode_parent,
388 NULL
389 };
390
391 static const char *follow_fork_mode_string = follow_fork_mode_parent;
392 static void
393 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
394 struct cmd_list_element *c, const char *value)
395 {
396 fprintf_filtered (file,
397 _("Debugger response to a program "
398 "call of fork or vfork is \"%s\".\n"),
399 value);
400 }
401 \f
402
403 /* Tell the target to follow the fork we're stopped at. Returns true
404 if the inferior should be resumed; false, if the target for some
405 reason decided it's best not to resume. */
406
407 static int
408 follow_fork (void)
409 {
410 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
411 int should_resume = 1;
412 struct thread_info *tp;
413
414 /* Copy user stepping state to the new inferior thread. FIXME: the
415 followed fork child thread should have a copy of most of the
416 parent thread structure's run control related fields, not just these.
417 Initialized to avoid "may be used uninitialized" warnings from gcc. */
418 struct breakpoint *step_resume_breakpoint = NULL;
419 struct breakpoint *exception_resume_breakpoint = NULL;
420 CORE_ADDR step_range_start = 0;
421 CORE_ADDR step_range_end = 0;
422 struct frame_id step_frame_id = { 0 };
423 struct interp *command_interp = NULL;
424
425 if (!non_stop)
426 {
427 ptid_t wait_ptid;
428 struct target_waitstatus wait_status;
429
430 /* Get the last target status returned by target_wait(). */
431 get_last_target_status (&wait_ptid, &wait_status);
432
433 /* If not stopped at a fork event, then there's nothing else to
434 do. */
435 if (wait_status.kind != TARGET_WAITKIND_FORKED
436 && wait_status.kind != TARGET_WAITKIND_VFORKED)
437 return 1;
438
439 /* Check if we switched over from WAIT_PTID, since the event was
440 reported. */
441 if (!ptid_equal (wait_ptid, minus_one_ptid)
442 && !ptid_equal (inferior_ptid, wait_ptid))
443 {
444 /* We did. Switch back to WAIT_PTID thread, to tell the
445 target to follow it (in either direction). We'll
446 afterwards refuse to resume, and inform the user what
447 happened. */
448 switch_to_thread (wait_ptid);
449 should_resume = 0;
450 }
451 }
452
453 tp = inferior_thread ();
454
455 /* If there were any forks/vforks that were caught and are now to be
456 followed, then do so now. */
457 switch (tp->pending_follow.kind)
458 {
459 case TARGET_WAITKIND_FORKED:
460 case TARGET_WAITKIND_VFORKED:
461 {
462 ptid_t parent, child;
463
464 /* If the user did a next/step, etc, over a fork call,
465 preserve the stepping state in the fork child. */
466 if (follow_child && should_resume)
467 {
468 step_resume_breakpoint = clone_momentary_breakpoint
469 (tp->control.step_resume_breakpoint);
470 step_range_start = tp->control.step_range_start;
471 step_range_end = tp->control.step_range_end;
472 step_frame_id = tp->control.step_frame_id;
473 exception_resume_breakpoint
474 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
475 command_interp = tp->control.command_interp;
476
477 /* For now, delete the parent's sr breakpoint, otherwise,
478 parent/child sr breakpoints are considered duplicates,
479 and the child version will not be installed. Remove
480 this when the breakpoints module becomes aware of
481 inferiors and address spaces. */
482 delete_step_resume_breakpoint (tp);
483 tp->control.step_range_start = 0;
484 tp->control.step_range_end = 0;
485 tp->control.step_frame_id = null_frame_id;
486 delete_exception_resume_breakpoint (tp);
487 tp->control.command_interp = NULL;
488 }
489
490 parent = inferior_ptid;
491 child = tp->pending_follow.value.related_pid;
492
493 /* Tell the target to do whatever is necessary to follow
494 either parent or child. */
495 if (target_follow_fork (follow_child, detach_fork))
496 {
497 /* Target refused to follow, or there's some other reason
498 we shouldn't resume. */
499 should_resume = 0;
500 }
501 else
502 {
503 /* This pending follow fork event is now handled, one way
504 or another. The previous selected thread may be gone
505 from the lists by now, but if it is still around, need
506 to clear the pending follow request. */
507 tp = find_thread_ptid (parent);
508 if (tp)
509 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
510
511 /* This makes sure we don't try to apply the "Switched
512 over from WAIT_PID" logic above. */
513 nullify_last_target_wait_ptid ();
514
515 /* If we followed the child, switch to it... */
516 if (follow_child)
517 {
518 switch_to_thread (child);
519
520 /* ... and preserve the stepping state, in case the
521 user was stepping over the fork call. */
522 if (should_resume)
523 {
524 tp = inferior_thread ();
525 tp->control.step_resume_breakpoint
526 = step_resume_breakpoint;
527 tp->control.step_range_start = step_range_start;
528 tp->control.step_range_end = step_range_end;
529 tp->control.step_frame_id = step_frame_id;
530 tp->control.exception_resume_breakpoint
531 = exception_resume_breakpoint;
532 tp->control.command_interp = command_interp;
533 }
534 else
535 {
536 /* If we get here, it was because we're trying to
537 resume from a fork catchpoint, but, the user
538 has switched threads away from the thread that
539 forked. In that case, the resume command
540 issued is most likely not applicable to the
541 child, so just warn, and refuse to resume. */
542 warning (_("Not resuming: switched threads "
543 "before following fork child.\n"));
544 }
545
546 /* Reset breakpoints in the child as appropriate. */
547 follow_inferior_reset_breakpoints ();
548 }
549 else
550 switch_to_thread (parent);
551 }
552 }
553 break;
554 case TARGET_WAITKIND_SPURIOUS:
555 /* Nothing to follow. */
556 break;
557 default:
558 internal_error (__FILE__, __LINE__,
559 "Unexpected pending_follow.kind %d\n",
560 tp->pending_follow.kind);
561 break;
562 }
563
564 return should_resume;
565 }
566
567 void
568 follow_inferior_reset_breakpoints (void)
569 {
570 struct thread_info *tp = inferior_thread ();
571
572 /* Was there a step_resume breakpoint? (There was if the user
573 did a "next" at the fork() call.) If so, explicitly reset its
574 thread number.
575
576 step_resumes are a form of bp that are made to be per-thread.
577 Since we created the step_resume bp when the parent process
578 was being debugged, and now are switching to the child process,
579 from the breakpoint package's viewpoint, that's a switch of
580 "threads". We must update the bp's notion of which thread
581 it is for, or it'll be ignored when it triggers. */
582
583 if (tp->control.step_resume_breakpoint)
584 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
585
586 if (tp->control.exception_resume_breakpoint)
587 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
588
589 /* Reinsert all breakpoints in the child. The user may have set
590 breakpoints after catching the fork, in which case those
591 were never set in the child, but only in the parent. This makes
592 sure the inserted breakpoints match the breakpoint list. */
593
594 breakpoint_re_set ();
595 insert_breakpoints ();
596 }
597
598 /* The child has exited or execed: resume threads of the parent the
599 user wanted to be executing. */
600
601 static int
602 proceed_after_vfork_done (struct thread_info *thread,
603 void *arg)
604 {
605 int pid = * (int *) arg;
606
607 if (ptid_get_pid (thread->ptid) == pid
608 && is_running (thread->ptid)
609 && !is_executing (thread->ptid)
610 && !thread->stop_requested
611 && thread->suspend.stop_signal == GDB_SIGNAL_0)
612 {
613 if (debug_infrun)
614 fprintf_unfiltered (gdb_stdlog,
615 "infrun: resuming vfork parent thread %s\n",
616 target_pid_to_str (thread->ptid));
617
618 switch_to_thread (thread->ptid);
619 clear_proceed_status ();
620 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
621 }
622
623 return 0;
624 }
625
626 /* Called whenever we notice an exec or exit event, to handle
627 detaching or resuming a vfork parent. */
628
629 static void
630 handle_vfork_child_exec_or_exit (int exec)
631 {
632 struct inferior *inf = current_inferior ();
633
634 if (inf->vfork_parent)
635 {
636 int resume_parent = -1;
637
638 /* This exec or exit marks the end of the shared memory region
639 between the parent and the child. If the user wanted to
640 detach from the parent, now is the time. */
641
642 if (inf->vfork_parent->pending_detach)
643 {
644 struct thread_info *tp;
645 struct cleanup *old_chain;
646 struct program_space *pspace;
647 struct address_space *aspace;
648
649 /* follow-fork child, detach-on-fork on. */
650
651 inf->vfork_parent->pending_detach = 0;
652
653 if (!exec)
654 {
655 /* If we're handling a child exit, then inferior_ptid
656 points at the inferior's pid, not to a thread. */
657 old_chain = save_inferior_ptid ();
658 save_current_program_space ();
659 save_current_inferior ();
660 }
661 else
662 old_chain = save_current_space_and_thread ();
663
664 /* We're letting loose of the parent. */
665 tp = any_live_thread_of_process (inf->vfork_parent->pid);
666 switch_to_thread (tp->ptid);
667
668 /* We're about to detach from the parent, which implicitly
669 removes breakpoints from its address space. There's a
670 catch here: we want to reuse the spaces for the child,
671 but, parent/child are still sharing the pspace at this
672 point, although the exec in reality makes the kernel give
673 the child a fresh set of new pages. The problem here is
674 that the breakpoints module being unaware of this, would
675 likely chose the child process to write to the parent
676 address space. Swapping the child temporarily away from
677 the spaces has the desired effect. Yes, this is "sort
678 of" a hack. */
679
680 pspace = inf->pspace;
681 aspace = inf->aspace;
682 inf->aspace = NULL;
683 inf->pspace = NULL;
684
685 if (debug_infrun || info_verbose)
686 {
687 target_terminal_ours ();
688
689 if (exec)
690 fprintf_filtered (gdb_stdlog,
691 "Detaching vfork parent process "
692 "%d after child exec.\n",
693 inf->vfork_parent->pid);
694 else
695 fprintf_filtered (gdb_stdlog,
696 "Detaching vfork parent process "
697 "%d after child exit.\n",
698 inf->vfork_parent->pid);
699 }
700
701 target_detach (NULL, 0);
702
703 /* Put it back. */
704 inf->pspace = pspace;
705 inf->aspace = aspace;
706
707 do_cleanups (old_chain);
708 }
709 else if (exec)
710 {
711 /* We're staying attached to the parent, so, really give the
712 child a new address space. */
713 inf->pspace = add_program_space (maybe_new_address_space ());
714 inf->aspace = inf->pspace->aspace;
715 inf->removable = 1;
716 set_current_program_space (inf->pspace);
717
718 resume_parent = inf->vfork_parent->pid;
719
720 /* Break the bonds. */
721 inf->vfork_parent->vfork_child = NULL;
722 }
723 else
724 {
725 struct cleanup *old_chain;
726 struct program_space *pspace;
727
728 /* If this is a vfork child exiting, then the pspace and
729 aspaces were shared with the parent. Since we're
730 reporting the process exit, we'll be mourning all that is
731 found in the address space, and switching to null_ptid,
732 preparing to start a new inferior. But, since we don't
733 want to clobber the parent's address/program spaces, we
734 go ahead and create a new one for this exiting
735 inferior. */
736
737 /* Switch to null_ptid, so that clone_program_space doesn't want
738 to read the selected frame of a dead process. */
739 old_chain = save_inferior_ptid ();
740 inferior_ptid = null_ptid;
741
742 /* This inferior is dead, so avoid giving the breakpoints
743 module the option to write through to it (cloning a
744 program space resets breakpoints). */
745 inf->aspace = NULL;
746 inf->pspace = NULL;
747 pspace = add_program_space (maybe_new_address_space ());
748 set_current_program_space (pspace);
749 inf->removable = 1;
750 inf->symfile_flags = SYMFILE_NO_READ;
751 clone_program_space (pspace, inf->vfork_parent->pspace);
752 inf->pspace = pspace;
753 inf->aspace = pspace->aspace;
754
755 /* Put back inferior_ptid. We'll continue mourning this
756 inferior. */
757 do_cleanups (old_chain);
758
759 resume_parent = inf->vfork_parent->pid;
760 /* Break the bonds. */
761 inf->vfork_parent->vfork_child = NULL;
762 }
763
764 inf->vfork_parent = NULL;
765
766 gdb_assert (current_program_space == inf->pspace);
767
768 if (non_stop && resume_parent != -1)
769 {
770 /* If the user wanted the parent to be running, let it go
771 free now. */
772 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
773
774 if (debug_infrun)
775 fprintf_unfiltered (gdb_stdlog,
776 "infrun: resuming vfork parent process %d\n",
777 resume_parent);
778
779 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
780
781 do_cleanups (old_chain);
782 }
783 }
784 }
785
786 /* Enum strings for "set|show follow-exec-mode". */
787
788 static const char follow_exec_mode_new[] = "new";
789 static const char follow_exec_mode_same[] = "same";
790 static const char *const follow_exec_mode_names[] =
791 {
792 follow_exec_mode_new,
793 follow_exec_mode_same,
794 NULL,
795 };
796
797 static const char *follow_exec_mode_string = follow_exec_mode_same;
798 static void
799 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
800 struct cmd_list_element *c, const char *value)
801 {
802 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
803 }
804
805 /* EXECD_PATHNAME is assumed to be non-NULL. */
806
807 static void
808 follow_exec (ptid_t pid, char *execd_pathname)
809 {
810 struct thread_info *th = inferior_thread ();
811 struct inferior *inf = current_inferior ();
812
813 /* This is an exec event that we actually wish to pay attention to.
814 Refresh our symbol table to the newly exec'd program, remove any
815 momentary bp's, etc.
816
817 If there are breakpoints, they aren't really inserted now,
818 since the exec() transformed our inferior into a fresh set
819 of instructions.
820
821 We want to preserve symbolic breakpoints on the list, since
822 we have hopes that they can be reset after the new a.out's
823 symbol table is read.
824
825 However, any "raw" breakpoints must be removed from the list
826 (e.g., the solib bp's), since their address is probably invalid
827 now.
828
829 And, we DON'T want to call delete_breakpoints() here, since
830 that may write the bp's "shadow contents" (the instruction
831 value that was overwritten witha TRAP instruction). Since
832 we now have a new a.out, those shadow contents aren't valid. */
833
834 mark_breakpoints_out ();
835
836 update_breakpoints_after_exec ();
837
838 /* If there was one, it's gone now. We cannot truly step-to-next
839 statement through an exec(). */
840 th->control.step_resume_breakpoint = NULL;
841 th->control.exception_resume_breakpoint = NULL;
842 th->control.step_range_start = 0;
843 th->control.step_range_end = 0;
844
845 /* The target reports the exec event to the main thread, even if
846 some other thread does the exec, and even if the main thread was
847 already stopped --- if debugging in non-stop mode, it's possible
848 the user had the main thread held stopped in the previous image
849 --- release it now. This is the same behavior as step-over-exec
850 with scheduler-locking on in all-stop mode. */
851 th->stop_requested = 0;
852
853 /* What is this a.out's name? */
854 printf_unfiltered (_("%s is executing new program: %s\n"),
855 target_pid_to_str (inferior_ptid),
856 execd_pathname);
857
858 /* We've followed the inferior through an exec. Therefore, the
859 inferior has essentially been killed & reborn. */
860
861 gdb_flush (gdb_stdout);
862
863 breakpoint_init_inferior (inf_execd);
864
865 if (gdb_sysroot && *gdb_sysroot)
866 {
867 char *name = alloca (strlen (gdb_sysroot)
868 + strlen (execd_pathname)
869 + 1);
870
871 strcpy (name, gdb_sysroot);
872 strcat (name, execd_pathname);
873 execd_pathname = name;
874 }
875
876 /* Reset the shared library package. This ensures that we get a
877 shlib event when the child reaches "_start", at which point the
878 dld will have had a chance to initialize the child. */
879 /* Also, loading a symbol file below may trigger symbol lookups, and
880 we don't want those to be satisfied by the libraries of the
881 previous incarnation of this process. */
882 no_shared_libraries (NULL, 0);
883
884 if (follow_exec_mode_string == follow_exec_mode_new)
885 {
886 struct program_space *pspace;
887
888 /* The user wants to keep the old inferior and program spaces
889 around. Create a new fresh one, and switch to it. */
890
891 inf = add_inferior (current_inferior ()->pid);
892 pspace = add_program_space (maybe_new_address_space ());
893 inf->pspace = pspace;
894 inf->aspace = pspace->aspace;
895
896 exit_inferior_num_silent (current_inferior ()->num);
897
898 set_current_inferior (inf);
899 set_current_program_space (pspace);
900 }
901 else
902 {
903 /* The old description may no longer be fit for the new image.
904 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
905 old description; we'll read a new one below. No need to do
906 this on "follow-exec-mode new", as the old inferior stays
907 around (its description is later cleared/refetched on
908 restart). */
909 target_clear_description ();
910 }
911
912 gdb_assert (current_program_space == inf->pspace);
913
914 /* That a.out is now the one to use. */
915 exec_file_attach (execd_pathname, 0);
916
917 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
918 (Position Independent Executable) main symbol file will get applied by
919 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
920 the breakpoints with the zero displacement. */
921
922 symbol_file_add (execd_pathname,
923 (inf->symfile_flags
924 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
925 NULL, 0);
926
927 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
928 set_initial_language ();
929
930 /* If the target can specify a description, read it. Must do this
931 after flipping to the new executable (because the target supplied
932 description must be compatible with the executable's
933 architecture, and the old executable may e.g., be 32-bit, while
934 the new one 64-bit), and before anything involving memory or
935 registers. */
936 target_find_description ();
937
938 solib_create_inferior_hook (0);
939
940 jit_inferior_created_hook ();
941
942 breakpoint_re_set ();
943
944 /* Reinsert all breakpoints. (Those which were symbolic have
945 been reset to the proper address in the new a.out, thanks
946 to symbol_file_command...). */
947 insert_breakpoints ();
948
949 /* The next resume of this inferior should bring it to the shlib
950 startup breakpoints. (If the user had also set bp's on
951 "main" from the old (parent) process, then they'll auto-
952 matically get reset there in the new process.). */
953 }
954
955 /* Non-zero if we just simulating a single-step. This is needed
956 because we cannot remove the breakpoints in the inferior process
957 until after the `wait' in `wait_for_inferior'. */
958 static int singlestep_breakpoints_inserted_p = 0;
959
960 /* The thread we inserted single-step breakpoints for. */
961 static ptid_t singlestep_ptid;
962
963 /* PC when we started this single-step. */
964 static CORE_ADDR singlestep_pc;
965
966 /* Info about an instruction that is being stepped over. Invalid if
967 ASPACE is NULL. */
968
969 struct step_over_info
970 {
971 /* The instruction's address space. */
972 struct address_space *aspace;
973
974 /* The instruction's address. */
975 CORE_ADDR address;
976 };
977
978 /* The step-over info of the location that is being stepped over.
979
980 Note that with async/breakpoint always-inserted mode, a user might
981 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
982 being stepped over. As setting a new breakpoint inserts all
983 breakpoints, we need to make sure the breakpoint being stepped over
984 isn't inserted then. We do that by only clearing the step-over
985 info when the step-over is actually finished (or aborted).
986
987 Presently GDB can only step over one breakpoint at any given time.
988 Given threads that can't run code in the same address space as the
989 breakpoint's can't really miss the breakpoint, GDB could be taught
990 to step-over at most one breakpoint per address space (so this info
991 could move to the address space object if/when GDB is extended).
992 The set of breakpoints being stepped over will normally be much
993 smaller than the set of all breakpoints, so a flag in the
994 breakpoint location structure would be wasteful. A separate list
995 also saves complexity and run-time, as otherwise we'd have to go
996 through all breakpoint locations clearing their flag whenever we
997 start a new sequence. Similar considerations weigh against storing
998 this info in the thread object. Plus, not all step overs actually
999 have breakpoint locations -- e.g., stepping past a single-step
1000 breakpoint, or stepping to complete a non-continuable
1001 watchpoint. */
1002 static struct step_over_info step_over_info;
1003
1004 /* Record the address of the breakpoint/instruction we're currently
1005 stepping over. */
1006
1007 static void
1008 set_step_over_info (struct address_space *aspace, CORE_ADDR address)
1009 {
1010 step_over_info.aspace = aspace;
1011 step_over_info.address = address;
1012 }
1013
1014 /* Called when we're not longer stepping over a breakpoint / an
1015 instruction, so all breakpoints are free to be (re)inserted. */
1016
1017 static void
1018 clear_step_over_info (void)
1019 {
1020 step_over_info.aspace = NULL;
1021 step_over_info.address = 0;
1022 }
1023
1024 /* See inferior.h. */
1025
1026 int
1027 stepping_past_instruction_at (struct address_space *aspace,
1028 CORE_ADDR address)
1029 {
1030 return (step_over_info.aspace != NULL
1031 && breakpoint_address_match (aspace, address,
1032 step_over_info.aspace,
1033 step_over_info.address));
1034 }
1035
1036 \f
1037 /* Displaced stepping. */
1038
1039 /* In non-stop debugging mode, we must take special care to manage
1040 breakpoints properly; in particular, the traditional strategy for
1041 stepping a thread past a breakpoint it has hit is unsuitable.
1042 'Displaced stepping' is a tactic for stepping one thread past a
1043 breakpoint it has hit while ensuring that other threads running
1044 concurrently will hit the breakpoint as they should.
1045
1046 The traditional way to step a thread T off a breakpoint in a
1047 multi-threaded program in all-stop mode is as follows:
1048
1049 a0) Initially, all threads are stopped, and breakpoints are not
1050 inserted.
1051 a1) We single-step T, leaving breakpoints uninserted.
1052 a2) We insert breakpoints, and resume all threads.
1053
1054 In non-stop debugging, however, this strategy is unsuitable: we
1055 don't want to have to stop all threads in the system in order to
1056 continue or step T past a breakpoint. Instead, we use displaced
1057 stepping:
1058
1059 n0) Initially, T is stopped, other threads are running, and
1060 breakpoints are inserted.
1061 n1) We copy the instruction "under" the breakpoint to a separate
1062 location, outside the main code stream, making any adjustments
1063 to the instruction, register, and memory state as directed by
1064 T's architecture.
1065 n2) We single-step T over the instruction at its new location.
1066 n3) We adjust the resulting register and memory state as directed
1067 by T's architecture. This includes resetting T's PC to point
1068 back into the main instruction stream.
1069 n4) We resume T.
1070
1071 This approach depends on the following gdbarch methods:
1072
1073 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1074 indicate where to copy the instruction, and how much space must
1075 be reserved there. We use these in step n1.
1076
1077 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1078 address, and makes any necessary adjustments to the instruction,
1079 register contents, and memory. We use this in step n1.
1080
1081 - gdbarch_displaced_step_fixup adjusts registers and memory after
1082 we have successfuly single-stepped the instruction, to yield the
1083 same effect the instruction would have had if we had executed it
1084 at its original address. We use this in step n3.
1085
1086 - gdbarch_displaced_step_free_closure provides cleanup.
1087
1088 The gdbarch_displaced_step_copy_insn and
1089 gdbarch_displaced_step_fixup functions must be written so that
1090 copying an instruction with gdbarch_displaced_step_copy_insn,
1091 single-stepping across the copied instruction, and then applying
1092 gdbarch_displaced_insn_fixup should have the same effects on the
1093 thread's memory and registers as stepping the instruction in place
1094 would have. Exactly which responsibilities fall to the copy and
1095 which fall to the fixup is up to the author of those functions.
1096
1097 See the comments in gdbarch.sh for details.
1098
1099 Note that displaced stepping and software single-step cannot
1100 currently be used in combination, although with some care I think
1101 they could be made to. Software single-step works by placing
1102 breakpoints on all possible subsequent instructions; if the
1103 displaced instruction is a PC-relative jump, those breakpoints
1104 could fall in very strange places --- on pages that aren't
1105 executable, or at addresses that are not proper instruction
1106 boundaries. (We do generally let other threads run while we wait
1107 to hit the software single-step breakpoint, and they might
1108 encounter such a corrupted instruction.) One way to work around
1109 this would be to have gdbarch_displaced_step_copy_insn fully
1110 simulate the effect of PC-relative instructions (and return NULL)
1111 on architectures that use software single-stepping.
1112
1113 In non-stop mode, we can have independent and simultaneous step
1114 requests, so more than one thread may need to simultaneously step
1115 over a breakpoint. The current implementation assumes there is
1116 only one scratch space per process. In this case, we have to
1117 serialize access to the scratch space. If thread A wants to step
1118 over a breakpoint, but we are currently waiting for some other
1119 thread to complete a displaced step, we leave thread A stopped and
1120 place it in the displaced_step_request_queue. Whenever a displaced
1121 step finishes, we pick the next thread in the queue and start a new
1122 displaced step operation on it. See displaced_step_prepare and
1123 displaced_step_fixup for details. */
1124
1125 struct displaced_step_request
1126 {
1127 ptid_t ptid;
1128 struct displaced_step_request *next;
1129 };
1130
1131 /* Per-inferior displaced stepping state. */
1132 struct displaced_step_inferior_state
1133 {
1134 /* Pointer to next in linked list. */
1135 struct displaced_step_inferior_state *next;
1136
1137 /* The process this displaced step state refers to. */
1138 int pid;
1139
1140 /* A queue of pending displaced stepping requests. One entry per
1141 thread that needs to do a displaced step. */
1142 struct displaced_step_request *step_request_queue;
1143
1144 /* If this is not null_ptid, this is the thread carrying out a
1145 displaced single-step in process PID. This thread's state will
1146 require fixing up once it has completed its step. */
1147 ptid_t step_ptid;
1148
1149 /* The architecture the thread had when we stepped it. */
1150 struct gdbarch *step_gdbarch;
1151
1152 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1153 for post-step cleanup. */
1154 struct displaced_step_closure *step_closure;
1155
1156 /* The address of the original instruction, and the copy we
1157 made. */
1158 CORE_ADDR step_original, step_copy;
1159
1160 /* Saved contents of copy area. */
1161 gdb_byte *step_saved_copy;
1162 };
1163
1164 /* The list of states of processes involved in displaced stepping
1165 presently. */
1166 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1167
1168 /* Get the displaced stepping state of process PID. */
1169
1170 static struct displaced_step_inferior_state *
1171 get_displaced_stepping_state (int pid)
1172 {
1173 struct displaced_step_inferior_state *state;
1174
1175 for (state = displaced_step_inferior_states;
1176 state != NULL;
1177 state = state->next)
1178 if (state->pid == pid)
1179 return state;
1180
1181 return NULL;
1182 }
1183
1184 /* Add a new displaced stepping state for process PID to the displaced
1185 stepping state list, or return a pointer to an already existing
1186 entry, if it already exists. Never returns NULL. */
1187
1188 static struct displaced_step_inferior_state *
1189 add_displaced_stepping_state (int pid)
1190 {
1191 struct displaced_step_inferior_state *state;
1192
1193 for (state = displaced_step_inferior_states;
1194 state != NULL;
1195 state = state->next)
1196 if (state->pid == pid)
1197 return state;
1198
1199 state = xcalloc (1, sizeof (*state));
1200 state->pid = pid;
1201 state->next = displaced_step_inferior_states;
1202 displaced_step_inferior_states = state;
1203
1204 return state;
1205 }
1206
1207 /* If inferior is in displaced stepping, and ADDR equals to starting address
1208 of copy area, return corresponding displaced_step_closure. Otherwise,
1209 return NULL. */
1210
1211 struct displaced_step_closure*
1212 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1213 {
1214 struct displaced_step_inferior_state *displaced
1215 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1216
1217 /* If checking the mode of displaced instruction in copy area. */
1218 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1219 && (displaced->step_copy == addr))
1220 return displaced->step_closure;
1221
1222 return NULL;
1223 }
1224
1225 /* Remove the displaced stepping state of process PID. */
1226
1227 static void
1228 remove_displaced_stepping_state (int pid)
1229 {
1230 struct displaced_step_inferior_state *it, **prev_next_p;
1231
1232 gdb_assert (pid != 0);
1233
1234 it = displaced_step_inferior_states;
1235 prev_next_p = &displaced_step_inferior_states;
1236 while (it)
1237 {
1238 if (it->pid == pid)
1239 {
1240 *prev_next_p = it->next;
1241 xfree (it);
1242 return;
1243 }
1244
1245 prev_next_p = &it->next;
1246 it = *prev_next_p;
1247 }
1248 }
1249
1250 static void
1251 infrun_inferior_exit (struct inferior *inf)
1252 {
1253 remove_displaced_stepping_state (inf->pid);
1254 }
1255
1256 /* If ON, and the architecture supports it, GDB will use displaced
1257 stepping to step over breakpoints. If OFF, or if the architecture
1258 doesn't support it, GDB will instead use the traditional
1259 hold-and-step approach. If AUTO (which is the default), GDB will
1260 decide which technique to use to step over breakpoints depending on
1261 which of all-stop or non-stop mode is active --- displaced stepping
1262 in non-stop mode; hold-and-step in all-stop mode. */
1263
1264 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1265
1266 static void
1267 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1268 struct cmd_list_element *c,
1269 const char *value)
1270 {
1271 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1272 fprintf_filtered (file,
1273 _("Debugger's willingness to use displaced stepping "
1274 "to step over breakpoints is %s (currently %s).\n"),
1275 value, non_stop ? "on" : "off");
1276 else
1277 fprintf_filtered (file,
1278 _("Debugger's willingness to use displaced stepping "
1279 "to step over breakpoints is %s.\n"), value);
1280 }
1281
1282 /* Return non-zero if displaced stepping can/should be used to step
1283 over breakpoints. */
1284
1285 static int
1286 use_displaced_stepping (struct gdbarch *gdbarch)
1287 {
1288 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1289 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1290 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1291 && find_record_target () == NULL);
1292 }
1293
1294 /* Clean out any stray displaced stepping state. */
1295 static void
1296 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1297 {
1298 /* Indicate that there is no cleanup pending. */
1299 displaced->step_ptid = null_ptid;
1300
1301 if (displaced->step_closure)
1302 {
1303 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1304 displaced->step_closure);
1305 displaced->step_closure = NULL;
1306 }
1307 }
1308
1309 static void
1310 displaced_step_clear_cleanup (void *arg)
1311 {
1312 struct displaced_step_inferior_state *state = arg;
1313
1314 displaced_step_clear (state);
1315 }
1316
1317 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1318 void
1319 displaced_step_dump_bytes (struct ui_file *file,
1320 const gdb_byte *buf,
1321 size_t len)
1322 {
1323 int i;
1324
1325 for (i = 0; i < len; i++)
1326 fprintf_unfiltered (file, "%02x ", buf[i]);
1327 fputs_unfiltered ("\n", file);
1328 }
1329
1330 /* Prepare to single-step, using displaced stepping.
1331
1332 Note that we cannot use displaced stepping when we have a signal to
1333 deliver. If we have a signal to deliver and an instruction to step
1334 over, then after the step, there will be no indication from the
1335 target whether the thread entered a signal handler or ignored the
1336 signal and stepped over the instruction successfully --- both cases
1337 result in a simple SIGTRAP. In the first case we mustn't do a
1338 fixup, and in the second case we must --- but we can't tell which.
1339 Comments in the code for 'random signals' in handle_inferior_event
1340 explain how we handle this case instead.
1341
1342 Returns 1 if preparing was successful -- this thread is going to be
1343 stepped now; or 0 if displaced stepping this thread got queued. */
1344 static int
1345 displaced_step_prepare (ptid_t ptid)
1346 {
1347 struct cleanup *old_cleanups, *ignore_cleanups;
1348 struct thread_info *tp = find_thread_ptid (ptid);
1349 struct regcache *regcache = get_thread_regcache (ptid);
1350 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1351 CORE_ADDR original, copy;
1352 ULONGEST len;
1353 struct displaced_step_closure *closure;
1354 struct displaced_step_inferior_state *displaced;
1355 int status;
1356
1357 /* We should never reach this function if the architecture does not
1358 support displaced stepping. */
1359 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1360
1361 /* Disable range stepping while executing in the scratch pad. We
1362 want a single-step even if executing the displaced instruction in
1363 the scratch buffer lands within the stepping range (e.g., a
1364 jump/branch). */
1365 tp->control.may_range_step = 0;
1366
1367 /* We have to displaced step one thread at a time, as we only have
1368 access to a single scratch space per inferior. */
1369
1370 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1371
1372 if (!ptid_equal (displaced->step_ptid, null_ptid))
1373 {
1374 /* Already waiting for a displaced step to finish. Defer this
1375 request and place in queue. */
1376 struct displaced_step_request *req, *new_req;
1377
1378 if (debug_displaced)
1379 fprintf_unfiltered (gdb_stdlog,
1380 "displaced: defering step of %s\n",
1381 target_pid_to_str (ptid));
1382
1383 new_req = xmalloc (sizeof (*new_req));
1384 new_req->ptid = ptid;
1385 new_req->next = NULL;
1386
1387 if (displaced->step_request_queue)
1388 {
1389 for (req = displaced->step_request_queue;
1390 req && req->next;
1391 req = req->next)
1392 ;
1393 req->next = new_req;
1394 }
1395 else
1396 displaced->step_request_queue = new_req;
1397
1398 return 0;
1399 }
1400 else
1401 {
1402 if (debug_displaced)
1403 fprintf_unfiltered (gdb_stdlog,
1404 "displaced: stepping %s now\n",
1405 target_pid_to_str (ptid));
1406 }
1407
1408 displaced_step_clear (displaced);
1409
1410 old_cleanups = save_inferior_ptid ();
1411 inferior_ptid = ptid;
1412
1413 original = regcache_read_pc (regcache);
1414
1415 copy = gdbarch_displaced_step_location (gdbarch);
1416 len = gdbarch_max_insn_length (gdbarch);
1417
1418 /* Save the original contents of the copy area. */
1419 displaced->step_saved_copy = xmalloc (len);
1420 ignore_cleanups = make_cleanup (free_current_contents,
1421 &displaced->step_saved_copy);
1422 status = target_read_memory (copy, displaced->step_saved_copy, len);
1423 if (status != 0)
1424 throw_error (MEMORY_ERROR,
1425 _("Error accessing memory address %s (%s) for "
1426 "displaced-stepping scratch space."),
1427 paddress (gdbarch, copy), safe_strerror (status));
1428 if (debug_displaced)
1429 {
1430 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1431 paddress (gdbarch, copy));
1432 displaced_step_dump_bytes (gdb_stdlog,
1433 displaced->step_saved_copy,
1434 len);
1435 };
1436
1437 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1438 original, copy, regcache);
1439
1440 /* We don't support the fully-simulated case at present. */
1441 gdb_assert (closure);
1442
1443 /* Save the information we need to fix things up if the step
1444 succeeds. */
1445 displaced->step_ptid = ptid;
1446 displaced->step_gdbarch = gdbarch;
1447 displaced->step_closure = closure;
1448 displaced->step_original = original;
1449 displaced->step_copy = copy;
1450
1451 make_cleanup (displaced_step_clear_cleanup, displaced);
1452
1453 /* Resume execution at the copy. */
1454 regcache_write_pc (regcache, copy);
1455
1456 discard_cleanups (ignore_cleanups);
1457
1458 do_cleanups (old_cleanups);
1459
1460 if (debug_displaced)
1461 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1462 paddress (gdbarch, copy));
1463
1464 return 1;
1465 }
1466
1467 static void
1468 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1469 const gdb_byte *myaddr, int len)
1470 {
1471 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1472
1473 inferior_ptid = ptid;
1474 write_memory (memaddr, myaddr, len);
1475 do_cleanups (ptid_cleanup);
1476 }
1477
1478 /* Restore the contents of the copy area for thread PTID. */
1479
1480 static void
1481 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1482 ptid_t ptid)
1483 {
1484 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1485
1486 write_memory_ptid (ptid, displaced->step_copy,
1487 displaced->step_saved_copy, len);
1488 if (debug_displaced)
1489 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1490 target_pid_to_str (ptid),
1491 paddress (displaced->step_gdbarch,
1492 displaced->step_copy));
1493 }
1494
1495 static void
1496 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1497 {
1498 struct cleanup *old_cleanups;
1499 struct displaced_step_inferior_state *displaced
1500 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1501
1502 /* Was any thread of this process doing a displaced step? */
1503 if (displaced == NULL)
1504 return;
1505
1506 /* Was this event for the pid we displaced? */
1507 if (ptid_equal (displaced->step_ptid, null_ptid)
1508 || ! ptid_equal (displaced->step_ptid, event_ptid))
1509 return;
1510
1511 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1512
1513 displaced_step_restore (displaced, displaced->step_ptid);
1514
1515 /* Did the instruction complete successfully? */
1516 if (signal == GDB_SIGNAL_TRAP)
1517 {
1518 /* Fix up the resulting state. */
1519 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1520 displaced->step_closure,
1521 displaced->step_original,
1522 displaced->step_copy,
1523 get_thread_regcache (displaced->step_ptid));
1524 }
1525 else
1526 {
1527 /* Since the instruction didn't complete, all we can do is
1528 relocate the PC. */
1529 struct regcache *regcache = get_thread_regcache (event_ptid);
1530 CORE_ADDR pc = regcache_read_pc (regcache);
1531
1532 pc = displaced->step_original + (pc - displaced->step_copy);
1533 regcache_write_pc (regcache, pc);
1534 }
1535
1536 do_cleanups (old_cleanups);
1537
1538 displaced->step_ptid = null_ptid;
1539
1540 /* Are there any pending displaced stepping requests? If so, run
1541 one now. Leave the state object around, since we're likely to
1542 need it again soon. */
1543 while (displaced->step_request_queue)
1544 {
1545 struct displaced_step_request *head;
1546 ptid_t ptid;
1547 struct regcache *regcache;
1548 struct gdbarch *gdbarch;
1549 CORE_ADDR actual_pc;
1550 struct address_space *aspace;
1551
1552 head = displaced->step_request_queue;
1553 ptid = head->ptid;
1554 displaced->step_request_queue = head->next;
1555 xfree (head);
1556
1557 context_switch (ptid);
1558
1559 regcache = get_thread_regcache (ptid);
1560 actual_pc = regcache_read_pc (regcache);
1561 aspace = get_regcache_aspace (regcache);
1562
1563 if (breakpoint_here_p (aspace, actual_pc))
1564 {
1565 if (debug_displaced)
1566 fprintf_unfiltered (gdb_stdlog,
1567 "displaced: stepping queued %s now\n",
1568 target_pid_to_str (ptid));
1569
1570 displaced_step_prepare (ptid);
1571
1572 gdbarch = get_regcache_arch (regcache);
1573
1574 if (debug_displaced)
1575 {
1576 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1577 gdb_byte buf[4];
1578
1579 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1580 paddress (gdbarch, actual_pc));
1581 read_memory (actual_pc, buf, sizeof (buf));
1582 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1583 }
1584
1585 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1586 displaced->step_closure))
1587 target_resume (ptid, 1, GDB_SIGNAL_0);
1588 else
1589 target_resume (ptid, 0, GDB_SIGNAL_0);
1590
1591 /* Done, we're stepping a thread. */
1592 break;
1593 }
1594 else
1595 {
1596 int step;
1597 struct thread_info *tp = inferior_thread ();
1598
1599 /* The breakpoint we were sitting under has since been
1600 removed. */
1601 tp->control.trap_expected = 0;
1602
1603 /* Go back to what we were trying to do. */
1604 step = currently_stepping (tp);
1605
1606 if (debug_displaced)
1607 fprintf_unfiltered (gdb_stdlog,
1608 "displaced: breakpoint is gone: %s, step(%d)\n",
1609 target_pid_to_str (tp->ptid), step);
1610
1611 target_resume (ptid, step, GDB_SIGNAL_0);
1612 tp->suspend.stop_signal = GDB_SIGNAL_0;
1613
1614 /* This request was discarded. See if there's any other
1615 thread waiting for its turn. */
1616 }
1617 }
1618 }
1619
1620 /* Update global variables holding ptids to hold NEW_PTID if they were
1621 holding OLD_PTID. */
1622 static void
1623 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1624 {
1625 struct displaced_step_request *it;
1626 struct displaced_step_inferior_state *displaced;
1627
1628 if (ptid_equal (inferior_ptid, old_ptid))
1629 inferior_ptid = new_ptid;
1630
1631 if (ptid_equal (singlestep_ptid, old_ptid))
1632 singlestep_ptid = new_ptid;
1633
1634 for (displaced = displaced_step_inferior_states;
1635 displaced;
1636 displaced = displaced->next)
1637 {
1638 if (ptid_equal (displaced->step_ptid, old_ptid))
1639 displaced->step_ptid = new_ptid;
1640
1641 for (it = displaced->step_request_queue; it; it = it->next)
1642 if (ptid_equal (it->ptid, old_ptid))
1643 it->ptid = new_ptid;
1644 }
1645 }
1646
1647 \f
1648 /* Resuming. */
1649
1650 /* Things to clean up if we QUIT out of resume (). */
1651 static void
1652 resume_cleanups (void *ignore)
1653 {
1654 normal_stop ();
1655 }
1656
1657 static const char schedlock_off[] = "off";
1658 static const char schedlock_on[] = "on";
1659 static const char schedlock_step[] = "step";
1660 static const char *const scheduler_enums[] = {
1661 schedlock_off,
1662 schedlock_on,
1663 schedlock_step,
1664 NULL
1665 };
1666 static const char *scheduler_mode = schedlock_off;
1667 static void
1668 show_scheduler_mode (struct ui_file *file, int from_tty,
1669 struct cmd_list_element *c, const char *value)
1670 {
1671 fprintf_filtered (file,
1672 _("Mode for locking scheduler "
1673 "during execution is \"%s\".\n"),
1674 value);
1675 }
1676
1677 static void
1678 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1679 {
1680 if (!target_can_lock_scheduler)
1681 {
1682 scheduler_mode = schedlock_off;
1683 error (_("Target '%s' cannot support this command."), target_shortname);
1684 }
1685 }
1686
1687 /* True if execution commands resume all threads of all processes by
1688 default; otherwise, resume only threads of the current inferior
1689 process. */
1690 int sched_multi = 0;
1691
1692 /* Try to setup for software single stepping over the specified location.
1693 Return 1 if target_resume() should use hardware single step.
1694
1695 GDBARCH the current gdbarch.
1696 PC the location to step over. */
1697
1698 static int
1699 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1700 {
1701 int hw_step = 1;
1702
1703 if (execution_direction == EXEC_FORWARD
1704 && gdbarch_software_single_step_p (gdbarch)
1705 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1706 {
1707 hw_step = 0;
1708 /* Do not pull these breakpoints until after a `wait' in
1709 `wait_for_inferior'. */
1710 singlestep_breakpoints_inserted_p = 1;
1711 singlestep_ptid = inferior_ptid;
1712 singlestep_pc = pc;
1713 }
1714 return hw_step;
1715 }
1716
1717 /* Return a ptid representing the set of threads that we will proceed,
1718 in the perspective of the user/frontend. We may actually resume
1719 fewer threads at first, e.g., if a thread is stopped at a
1720 breakpoint that needs stepping-off, but that should not be visible
1721 to the user/frontend, and neither should the frontend/user be
1722 allowed to proceed any of the threads that happen to be stopped for
1723 internal run control handling, if a previous command wanted them
1724 resumed. */
1725
1726 ptid_t
1727 user_visible_resume_ptid (int step)
1728 {
1729 /* By default, resume all threads of all processes. */
1730 ptid_t resume_ptid = RESUME_ALL;
1731
1732 /* Maybe resume only all threads of the current process. */
1733 if (!sched_multi && target_supports_multi_process ())
1734 {
1735 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1736 }
1737
1738 /* Maybe resume a single thread after all. */
1739 if (non_stop)
1740 {
1741 /* With non-stop mode on, threads are always handled
1742 individually. */
1743 resume_ptid = inferior_ptid;
1744 }
1745 else if ((scheduler_mode == schedlock_on)
1746 || (scheduler_mode == schedlock_step
1747 && (step || singlestep_breakpoints_inserted_p)))
1748 {
1749 /* User-settable 'scheduler' mode requires solo thread resume. */
1750 resume_ptid = inferior_ptid;
1751 }
1752
1753 return resume_ptid;
1754 }
1755
1756 /* Resume the inferior, but allow a QUIT. This is useful if the user
1757 wants to interrupt some lengthy single-stepping operation
1758 (for child processes, the SIGINT goes to the inferior, and so
1759 we get a SIGINT random_signal, but for remote debugging and perhaps
1760 other targets, that's not true).
1761
1762 STEP nonzero if we should step (zero to continue instead).
1763 SIG is the signal to give the inferior (zero for none). */
1764 void
1765 resume (int step, enum gdb_signal sig)
1766 {
1767 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1768 struct regcache *regcache = get_current_regcache ();
1769 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1770 struct thread_info *tp = inferior_thread ();
1771 CORE_ADDR pc = regcache_read_pc (regcache);
1772 struct address_space *aspace = get_regcache_aspace (regcache);
1773 ptid_t resume_ptid;
1774 int hw_step = step;
1775
1776 QUIT;
1777
1778 if (current_inferior ()->waiting_for_vfork_done)
1779 {
1780 /* Don't try to single-step a vfork parent that is waiting for
1781 the child to get out of the shared memory region (by exec'ing
1782 or exiting). This is particularly important on software
1783 single-step archs, as the child process would trip on the
1784 software single step breakpoint inserted for the parent
1785 process. Since the parent will not actually execute any
1786 instruction until the child is out of the shared region (such
1787 are vfork's semantics), it is safe to simply continue it.
1788 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1789 the parent, and tell it to `keep_going', which automatically
1790 re-sets it stepping. */
1791 if (debug_infrun)
1792 fprintf_unfiltered (gdb_stdlog,
1793 "infrun: resume : clear step\n");
1794 hw_step = 0;
1795 }
1796
1797 if (debug_infrun)
1798 fprintf_unfiltered (gdb_stdlog,
1799 "infrun: resume (step=%d, signal=%s), "
1800 "trap_expected=%d, current thread [%s] at %s\n",
1801 step, gdb_signal_to_symbol_string (sig),
1802 tp->control.trap_expected,
1803 target_pid_to_str (inferior_ptid),
1804 paddress (gdbarch, pc));
1805
1806 /* Normally, by the time we reach `resume', the breakpoints are either
1807 removed or inserted, as appropriate. The exception is if we're sitting
1808 at a permanent breakpoint; we need to step over it, but permanent
1809 breakpoints can't be removed. So we have to test for it here. */
1810 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1811 {
1812 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1813 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1814 else
1815 error (_("\
1816 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1817 how to step past a permanent breakpoint on this architecture. Try using\n\
1818 a command like `return' or `jump' to continue execution."));
1819 }
1820
1821 /* If we have a breakpoint to step over, make sure to do a single
1822 step only. Same if we have software watchpoints. */
1823 if (tp->control.trap_expected || bpstat_should_step ())
1824 tp->control.may_range_step = 0;
1825
1826 /* If enabled, step over breakpoints by executing a copy of the
1827 instruction at a different address.
1828
1829 We can't use displaced stepping when we have a signal to deliver;
1830 the comments for displaced_step_prepare explain why. The
1831 comments in the handle_inferior event for dealing with 'random
1832 signals' explain what we do instead.
1833
1834 We can't use displaced stepping when we are waiting for vfork_done
1835 event, displaced stepping breaks the vfork child similarly as single
1836 step software breakpoint. */
1837 if (use_displaced_stepping (gdbarch)
1838 && (tp->control.trap_expected
1839 || (hw_step && gdbarch_software_single_step_p (gdbarch)))
1840 && sig == GDB_SIGNAL_0
1841 && !current_inferior ()->waiting_for_vfork_done)
1842 {
1843 struct displaced_step_inferior_state *displaced;
1844
1845 if (!displaced_step_prepare (inferior_ptid))
1846 {
1847 /* Got placed in displaced stepping queue. Will be resumed
1848 later when all the currently queued displaced stepping
1849 requests finish. The thread is not executing at this
1850 point, and the call to set_executing will be made later.
1851 But we need to call set_running here, since from the
1852 user/frontend's point of view, threads were set running.
1853 Unless we're calling an inferior function, as in that
1854 case we pretend the inferior doesn't run at all. */
1855 if (!tp->control.in_infcall)
1856 set_running (user_visible_resume_ptid (step), 1);
1857 discard_cleanups (old_cleanups);
1858 return;
1859 }
1860
1861 /* Update pc to reflect the new address from which we will execute
1862 instructions due to displaced stepping. */
1863 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1864
1865 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1866 hw_step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1867 displaced->step_closure);
1868 }
1869
1870 /* Do we need to do it the hard way, w/temp breakpoints? */
1871 else if (step)
1872 step = maybe_software_singlestep (gdbarch, pc);
1873
1874 /* Currently, our software single-step implementation leads to different
1875 results than hardware single-stepping in one situation: when stepping
1876 into delivering a signal which has an associated signal handler,
1877 hardware single-step will stop at the first instruction of the handler,
1878 while software single-step will simply skip execution of the handler.
1879
1880 For now, this difference in behavior is accepted since there is no
1881 easy way to actually implement single-stepping into a signal handler
1882 without kernel support.
1883
1884 However, there is one scenario where this difference leads to follow-on
1885 problems: if we're stepping off a breakpoint by removing all breakpoints
1886 and then single-stepping. In this case, the software single-step
1887 behavior means that even if there is a *breakpoint* in the signal
1888 handler, GDB still would not stop.
1889
1890 Fortunately, we can at least fix this particular issue. We detect
1891 here the case where we are about to deliver a signal while software
1892 single-stepping with breakpoints removed. In this situation, we
1893 revert the decisions to remove all breakpoints and insert single-
1894 step breakpoints, and instead we install a step-resume breakpoint
1895 at the current address, deliver the signal without stepping, and
1896 once we arrive back at the step-resume breakpoint, actually step
1897 over the breakpoint we originally wanted to step over. */
1898 if (singlestep_breakpoints_inserted_p
1899 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1900 {
1901 /* If we have nested signals or a pending signal is delivered
1902 immediately after a handler returns, might might already have
1903 a step-resume breakpoint set on the earlier handler. We cannot
1904 set another step-resume breakpoint; just continue on until the
1905 original breakpoint is hit. */
1906 if (tp->control.step_resume_breakpoint == NULL)
1907 {
1908 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1909 tp->step_after_step_resume_breakpoint = 1;
1910 }
1911
1912 remove_single_step_breakpoints ();
1913 singlestep_breakpoints_inserted_p = 0;
1914
1915 clear_step_over_info ();
1916 tp->control.trap_expected = 0;
1917
1918 insert_breakpoints ();
1919 }
1920
1921 /* If STEP is set, it's a request to use hardware stepping
1922 facilities. But in that case, we should never
1923 use singlestep breakpoint. */
1924 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1925
1926 /* Decide the set of threads to ask the target to resume. Start
1927 by assuming everything will be resumed, than narrow the set
1928 by applying increasingly restricting conditions. */
1929 resume_ptid = user_visible_resume_ptid (step);
1930
1931 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
1932 (e.g., we might need to step over a breakpoint), from the
1933 user/frontend's point of view, all threads in RESUME_PTID are now
1934 running. Unless we're calling an inferior function, as in that
1935 case pretend we inferior doesn't run at all. */
1936 if (!tp->control.in_infcall)
1937 set_running (resume_ptid, 1);
1938
1939 /* Maybe resume a single thread after all. */
1940 if ((step || singlestep_breakpoints_inserted_p)
1941 && tp->control.trap_expected)
1942 {
1943 /* We're allowing a thread to run past a breakpoint it has
1944 hit, by single-stepping the thread with the breakpoint
1945 removed. In which case, we need to single-step only this
1946 thread, and keep others stopped, as they can miss this
1947 breakpoint if allowed to run. */
1948 resume_ptid = inferior_ptid;
1949 }
1950
1951 if (gdbarch_cannot_step_breakpoint (gdbarch))
1952 {
1953 /* Most targets can step a breakpoint instruction, thus
1954 executing it normally. But if this one cannot, just
1955 continue and we will hit it anyway. */
1956 if (step && breakpoint_inserted_here_p (aspace, pc))
1957 step = 0;
1958 }
1959
1960 if (debug_displaced
1961 && use_displaced_stepping (gdbarch)
1962 && tp->control.trap_expected)
1963 {
1964 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1965 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1966 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1967 gdb_byte buf[4];
1968
1969 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1970 paddress (resume_gdbarch, actual_pc));
1971 read_memory (actual_pc, buf, sizeof (buf));
1972 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1973 }
1974
1975 if (tp->control.may_range_step)
1976 {
1977 /* If we're resuming a thread with the PC out of the step
1978 range, then we're doing some nested/finer run control
1979 operation, like stepping the thread out of the dynamic
1980 linker or the displaced stepping scratch pad. We
1981 shouldn't have allowed a range step then. */
1982 gdb_assert (pc_in_thread_step_range (pc, tp));
1983 }
1984
1985 /* Install inferior's terminal modes. */
1986 target_terminal_inferior ();
1987
1988 /* Avoid confusing the next resume, if the next stop/resume
1989 happens to apply to another thread. */
1990 tp->suspend.stop_signal = GDB_SIGNAL_0;
1991
1992 /* Advise target which signals may be handled silently. If we have
1993 removed breakpoints because we are stepping over one (which can
1994 happen only if we are not using displaced stepping), we need to
1995 receive all signals to avoid accidentally skipping a breakpoint
1996 during execution of a signal handler. */
1997 if ((step || singlestep_breakpoints_inserted_p)
1998 && tp->control.trap_expected
1999 && !use_displaced_stepping (gdbarch))
2000 target_pass_signals (0, NULL);
2001 else
2002 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2003
2004 target_resume (resume_ptid, step, sig);
2005
2006 discard_cleanups (old_cleanups);
2007 }
2008 \f
2009 /* Proceeding. */
2010
2011 /* Clear out all variables saying what to do when inferior is continued.
2012 First do this, then set the ones you want, then call `proceed'. */
2013
2014 static void
2015 clear_proceed_status_thread (struct thread_info *tp)
2016 {
2017 if (debug_infrun)
2018 fprintf_unfiltered (gdb_stdlog,
2019 "infrun: clear_proceed_status_thread (%s)\n",
2020 target_pid_to_str (tp->ptid));
2021
2022 tp->control.trap_expected = 0;
2023 tp->control.step_range_start = 0;
2024 tp->control.step_range_end = 0;
2025 tp->control.may_range_step = 0;
2026 tp->control.step_frame_id = null_frame_id;
2027 tp->control.step_stack_frame_id = null_frame_id;
2028 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2029 tp->stop_requested = 0;
2030
2031 tp->control.stop_step = 0;
2032
2033 tp->control.proceed_to_finish = 0;
2034
2035 tp->control.command_interp = NULL;
2036
2037 /* Discard any remaining commands or status from previous stop. */
2038 bpstat_clear (&tp->control.stop_bpstat);
2039 }
2040
2041 static int
2042 clear_proceed_status_callback (struct thread_info *tp, void *data)
2043 {
2044 if (is_exited (tp->ptid))
2045 return 0;
2046
2047 clear_proceed_status_thread (tp);
2048 return 0;
2049 }
2050
2051 void
2052 clear_proceed_status (void)
2053 {
2054 if (!non_stop)
2055 {
2056 /* In all-stop mode, delete the per-thread status of all
2057 threads, even if inferior_ptid is null_ptid, there may be
2058 threads on the list. E.g., we may be launching a new
2059 process, while selecting the executable. */
2060 iterate_over_threads (clear_proceed_status_callback, NULL);
2061 }
2062
2063 if (!ptid_equal (inferior_ptid, null_ptid))
2064 {
2065 struct inferior *inferior;
2066
2067 if (non_stop)
2068 {
2069 /* If in non-stop mode, only delete the per-thread status of
2070 the current thread. */
2071 clear_proceed_status_thread (inferior_thread ());
2072 }
2073
2074 inferior = current_inferior ();
2075 inferior->control.stop_soon = NO_STOP_QUIETLY;
2076 }
2077
2078 stop_after_trap = 0;
2079
2080 clear_step_over_info ();
2081
2082 observer_notify_about_to_proceed ();
2083
2084 if (stop_registers)
2085 {
2086 regcache_xfree (stop_registers);
2087 stop_registers = NULL;
2088 }
2089 }
2090
2091 /* Returns true if TP is still stopped at a breakpoint that needs
2092 stepping-over in order to make progress. If the breakpoint is gone
2093 meanwhile, we can skip the whole step-over dance. */
2094
2095 static int
2096 thread_still_needs_step_over (struct thread_info *tp)
2097 {
2098 if (tp->stepping_over_breakpoint)
2099 {
2100 struct regcache *regcache = get_thread_regcache (tp->ptid);
2101
2102 if (breakpoint_here_p (get_regcache_aspace (regcache),
2103 regcache_read_pc (regcache)))
2104 return 1;
2105
2106 tp->stepping_over_breakpoint = 0;
2107 }
2108
2109 return 0;
2110 }
2111
2112 /* Returns true if scheduler locking applies. STEP indicates whether
2113 we're about to do a step/next-like command to a thread. */
2114
2115 static int
2116 schedlock_applies (int step)
2117 {
2118 return (scheduler_mode == schedlock_on
2119 || (scheduler_mode == schedlock_step
2120 && step));
2121 }
2122
2123 /* Look a thread other than EXCEPT that has previously reported a
2124 breakpoint event, and thus needs a step-over in order to make
2125 progress. Returns NULL is none is found. STEP indicates whether
2126 we're about to step the current thread, in order to decide whether
2127 "set scheduler-locking step" applies. */
2128
2129 static struct thread_info *
2130 find_thread_needs_step_over (int step, struct thread_info *except)
2131 {
2132 struct thread_info *tp, *current;
2133
2134 /* With non-stop mode on, threads are always handled individually. */
2135 gdb_assert (! non_stop);
2136
2137 current = inferior_thread ();
2138
2139 /* If scheduler locking applies, we can avoid iterating over all
2140 threads. */
2141 if (schedlock_applies (step))
2142 {
2143 if (except != current
2144 && thread_still_needs_step_over (current))
2145 return current;
2146
2147 return NULL;
2148 }
2149
2150 ALL_THREADS (tp)
2151 {
2152 /* Ignore the EXCEPT thread. */
2153 if (tp == except)
2154 continue;
2155 /* Ignore threads of processes we're not resuming. */
2156 if (!sched_multi
2157 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2158 continue;
2159
2160 if (thread_still_needs_step_over (tp))
2161 return tp;
2162 }
2163
2164 return NULL;
2165 }
2166
2167 /* Basic routine for continuing the program in various fashions.
2168
2169 ADDR is the address to resume at, or -1 for resume where stopped.
2170 SIGGNAL is the signal to give it, or 0 for none,
2171 or -1 for act according to how it stopped.
2172 STEP is nonzero if should trap after one instruction.
2173 -1 means return after that and print nothing.
2174 You should probably set various step_... variables
2175 before calling here, if you are stepping.
2176
2177 You should call clear_proceed_status before calling proceed. */
2178
2179 void
2180 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2181 {
2182 struct regcache *regcache;
2183 struct gdbarch *gdbarch;
2184 struct thread_info *tp;
2185 CORE_ADDR pc;
2186 struct address_space *aspace;
2187
2188 /* If we're stopped at a fork/vfork, follow the branch set by the
2189 "set follow-fork-mode" command; otherwise, we'll just proceed
2190 resuming the current thread. */
2191 if (!follow_fork ())
2192 {
2193 /* The target for some reason decided not to resume. */
2194 normal_stop ();
2195 if (target_can_async_p ())
2196 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2197 return;
2198 }
2199
2200 /* We'll update this if & when we switch to a new thread. */
2201 previous_inferior_ptid = inferior_ptid;
2202
2203 regcache = get_current_regcache ();
2204 gdbarch = get_regcache_arch (regcache);
2205 aspace = get_regcache_aspace (regcache);
2206 pc = regcache_read_pc (regcache);
2207 tp = inferior_thread ();
2208
2209 if (step > 0)
2210 step_start_function = find_pc_function (pc);
2211 if (step < 0)
2212 stop_after_trap = 1;
2213
2214 /* Fill in with reasonable starting values. */
2215 init_thread_stepping_state (tp);
2216
2217 if (addr == (CORE_ADDR) -1)
2218 {
2219 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2220 && execution_direction != EXEC_REVERSE)
2221 /* There is a breakpoint at the address we will resume at,
2222 step one instruction before inserting breakpoints so that
2223 we do not stop right away (and report a second hit at this
2224 breakpoint).
2225
2226 Note, we don't do this in reverse, because we won't
2227 actually be executing the breakpoint insn anyway.
2228 We'll be (un-)executing the previous instruction. */
2229 tp->stepping_over_breakpoint = 1;
2230 else if (gdbarch_single_step_through_delay_p (gdbarch)
2231 && gdbarch_single_step_through_delay (gdbarch,
2232 get_current_frame ()))
2233 /* We stepped onto an instruction that needs to be stepped
2234 again before re-inserting the breakpoint, do so. */
2235 tp->stepping_over_breakpoint = 1;
2236 }
2237 else
2238 {
2239 regcache_write_pc (regcache, addr);
2240 }
2241
2242 /* Record the interpreter that issued the execution command that
2243 caused this thread to resume. If the top level interpreter is
2244 MI/async, and the execution command was a CLI command
2245 (next/step/etc.), we'll want to print stop event output to the MI
2246 console channel (the stepped-to line, etc.), as if the user
2247 entered the execution command on a real GDB console. */
2248 inferior_thread ()->control.command_interp = command_interp ();
2249
2250 if (debug_infrun)
2251 fprintf_unfiltered (gdb_stdlog,
2252 "infrun: proceed (addr=%s, signal=%s, step=%d)\n",
2253 paddress (gdbarch, addr),
2254 gdb_signal_to_symbol_string (siggnal), step);
2255
2256 if (non_stop)
2257 /* In non-stop, each thread is handled individually. The context
2258 must already be set to the right thread here. */
2259 ;
2260 else
2261 {
2262 struct thread_info *step_over;
2263
2264 /* In a multi-threaded task we may select another thread and
2265 then continue or step.
2266
2267 But if the old thread was stopped at a breakpoint, it will
2268 immediately cause another breakpoint stop without any
2269 execution (i.e. it will report a breakpoint hit incorrectly).
2270 So we must step over it first.
2271
2272 Look for a thread other than the current (TP) that reported a
2273 breakpoint hit and hasn't been resumed yet since. */
2274 step_over = find_thread_needs_step_over (step, tp);
2275 if (step_over != NULL)
2276 {
2277 if (debug_infrun)
2278 fprintf_unfiltered (gdb_stdlog,
2279 "infrun: need to step-over [%s] first\n",
2280 target_pid_to_str (step_over->ptid));
2281
2282 /* Store the prev_pc for the stepping thread too, needed by
2283 switch_back_to_stepping thread. */
2284 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2285 switch_to_thread (step_over->ptid);
2286 tp = step_over;
2287 }
2288 }
2289
2290 /* If we need to step over a breakpoint, and we're not using
2291 displaced stepping to do so, insert all breakpoints (watchpoints,
2292 etc.) but the one we're stepping over, step one instruction, and
2293 then re-insert the breakpoint when that step is finished. */
2294 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2295 {
2296 struct regcache *regcache = get_current_regcache ();
2297
2298 set_step_over_info (get_regcache_aspace (regcache),
2299 regcache_read_pc (regcache));
2300 }
2301 else
2302 clear_step_over_info ();
2303
2304 insert_breakpoints ();
2305
2306 tp->control.trap_expected = tp->stepping_over_breakpoint;
2307
2308 if (!non_stop)
2309 {
2310 /* Pass the last stop signal to the thread we're resuming,
2311 irrespective of whether the current thread is the thread that
2312 got the last event or not. This was historically GDB's
2313 behaviour before keeping a stop_signal per thread. */
2314
2315 struct thread_info *last_thread;
2316 ptid_t last_ptid;
2317 struct target_waitstatus last_status;
2318
2319 get_last_target_status (&last_ptid, &last_status);
2320 if (!ptid_equal (inferior_ptid, last_ptid)
2321 && !ptid_equal (last_ptid, null_ptid)
2322 && !ptid_equal (last_ptid, minus_one_ptid))
2323 {
2324 last_thread = find_thread_ptid (last_ptid);
2325 if (last_thread)
2326 {
2327 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2328 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2329 }
2330 }
2331 }
2332
2333 if (siggnal != GDB_SIGNAL_DEFAULT)
2334 tp->suspend.stop_signal = siggnal;
2335 /* If this signal should not be seen by program,
2336 give it zero. Used for debugging signals. */
2337 else if (!signal_program[tp->suspend.stop_signal])
2338 tp->suspend.stop_signal = GDB_SIGNAL_0;
2339
2340 annotate_starting ();
2341
2342 /* Make sure that output from GDB appears before output from the
2343 inferior. */
2344 gdb_flush (gdb_stdout);
2345
2346 /* Refresh prev_pc value just prior to resuming. This used to be
2347 done in stop_stepping, however, setting prev_pc there did not handle
2348 scenarios such as inferior function calls or returning from
2349 a function via the return command. In those cases, the prev_pc
2350 value was not set properly for subsequent commands. The prev_pc value
2351 is used to initialize the starting line number in the ecs. With an
2352 invalid value, the gdb next command ends up stopping at the position
2353 represented by the next line table entry past our start position.
2354 On platforms that generate one line table entry per line, this
2355 is not a problem. However, on the ia64, the compiler generates
2356 extraneous line table entries that do not increase the line number.
2357 When we issue the gdb next command on the ia64 after an inferior call
2358 or a return command, we often end up a few instructions forward, still
2359 within the original line we started.
2360
2361 An attempt was made to refresh the prev_pc at the same time the
2362 execution_control_state is initialized (for instance, just before
2363 waiting for an inferior event). But this approach did not work
2364 because of platforms that use ptrace, where the pc register cannot
2365 be read unless the inferior is stopped. At that point, we are not
2366 guaranteed the inferior is stopped and so the regcache_read_pc() call
2367 can fail. Setting the prev_pc value here ensures the value is updated
2368 correctly when the inferior is stopped. */
2369 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2370
2371 /* Reset to normal state. */
2372 init_infwait_state ();
2373
2374 /* Resume inferior. */
2375 resume (tp->control.trap_expected || step || bpstat_should_step (),
2376 tp->suspend.stop_signal);
2377
2378 /* Wait for it to stop (if not standalone)
2379 and in any case decode why it stopped, and act accordingly. */
2380 /* Do this only if we are not using the event loop, or if the target
2381 does not support asynchronous execution. */
2382 if (!target_can_async_p ())
2383 {
2384 wait_for_inferior ();
2385 normal_stop ();
2386 }
2387 }
2388 \f
2389
2390 /* Start remote-debugging of a machine over a serial link. */
2391
2392 void
2393 start_remote (int from_tty)
2394 {
2395 struct inferior *inferior;
2396
2397 inferior = current_inferior ();
2398 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2399
2400 /* Always go on waiting for the target, regardless of the mode. */
2401 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2402 indicate to wait_for_inferior that a target should timeout if
2403 nothing is returned (instead of just blocking). Because of this,
2404 targets expecting an immediate response need to, internally, set
2405 things up so that the target_wait() is forced to eventually
2406 timeout. */
2407 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2408 differentiate to its caller what the state of the target is after
2409 the initial open has been performed. Here we're assuming that
2410 the target has stopped. It should be possible to eventually have
2411 target_open() return to the caller an indication that the target
2412 is currently running and GDB state should be set to the same as
2413 for an async run. */
2414 wait_for_inferior ();
2415
2416 /* Now that the inferior has stopped, do any bookkeeping like
2417 loading shared libraries. We want to do this before normal_stop,
2418 so that the displayed frame is up to date. */
2419 post_create_inferior (&current_target, from_tty);
2420
2421 normal_stop ();
2422 }
2423
2424 /* Initialize static vars when a new inferior begins. */
2425
2426 void
2427 init_wait_for_inferior (void)
2428 {
2429 /* These are meaningless until the first time through wait_for_inferior. */
2430
2431 breakpoint_init_inferior (inf_starting);
2432
2433 clear_proceed_status ();
2434
2435 target_last_wait_ptid = minus_one_ptid;
2436
2437 previous_inferior_ptid = inferior_ptid;
2438 init_infwait_state ();
2439
2440 /* Discard any skipped inlined frames. */
2441 clear_inline_frame_state (minus_one_ptid);
2442
2443 singlestep_ptid = null_ptid;
2444 singlestep_pc = 0;
2445 }
2446
2447 \f
2448 /* This enum encodes possible reasons for doing a target_wait, so that
2449 wfi can call target_wait in one place. (Ultimately the call will be
2450 moved out of the infinite loop entirely.) */
2451
2452 enum infwait_states
2453 {
2454 infwait_normal_state,
2455 infwait_step_watch_state,
2456 infwait_nonstep_watch_state
2457 };
2458
2459 /* The PTID we'll do a target_wait on.*/
2460 ptid_t waiton_ptid;
2461
2462 /* Current inferior wait state. */
2463 static enum infwait_states infwait_state;
2464
2465 /* Data to be passed around while handling an event. This data is
2466 discarded between events. */
2467 struct execution_control_state
2468 {
2469 ptid_t ptid;
2470 /* The thread that got the event, if this was a thread event; NULL
2471 otherwise. */
2472 struct thread_info *event_thread;
2473
2474 struct target_waitstatus ws;
2475 int stop_func_filled_in;
2476 CORE_ADDR stop_func_start;
2477 CORE_ADDR stop_func_end;
2478 const char *stop_func_name;
2479 int wait_some_more;
2480
2481 /* We were in infwait_step_watch_state or
2482 infwait_nonstep_watch_state state, and the thread reported an
2483 event. */
2484 int stepped_after_stopped_by_watchpoint;
2485
2486 /* True if the event thread hit the single-step breakpoint of
2487 another thread. Thus the event doesn't cause a stop, the thread
2488 needs to be single-stepped past the single-step breakpoint before
2489 we can switch back to the original stepping thread. */
2490 int hit_singlestep_breakpoint;
2491 };
2492
2493 static void handle_inferior_event (struct execution_control_state *ecs);
2494
2495 static void handle_step_into_function (struct gdbarch *gdbarch,
2496 struct execution_control_state *ecs);
2497 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2498 struct execution_control_state *ecs);
2499 static void handle_signal_stop (struct execution_control_state *ecs);
2500 static void check_exception_resume (struct execution_control_state *,
2501 struct frame_info *);
2502
2503 static void stop_stepping (struct execution_control_state *ecs);
2504 static void prepare_to_wait (struct execution_control_state *ecs);
2505 static void keep_going (struct execution_control_state *ecs);
2506 static void process_event_stop_test (struct execution_control_state *ecs);
2507 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2508
2509 /* Callback for iterate over threads. If the thread is stopped, but
2510 the user/frontend doesn't know about that yet, go through
2511 normal_stop, as if the thread had just stopped now. ARG points at
2512 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2513 ptid_is_pid(PTID) is true, applies to all threads of the process
2514 pointed at by PTID. Otherwise, apply only to the thread pointed by
2515 PTID. */
2516
2517 static int
2518 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2519 {
2520 ptid_t ptid = * (ptid_t *) arg;
2521
2522 if ((ptid_equal (info->ptid, ptid)
2523 || ptid_equal (minus_one_ptid, ptid)
2524 || (ptid_is_pid (ptid)
2525 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2526 && is_running (info->ptid)
2527 && !is_executing (info->ptid))
2528 {
2529 struct cleanup *old_chain;
2530 struct execution_control_state ecss;
2531 struct execution_control_state *ecs = &ecss;
2532
2533 memset (ecs, 0, sizeof (*ecs));
2534
2535 old_chain = make_cleanup_restore_current_thread ();
2536
2537 overlay_cache_invalid = 1;
2538 /* Flush target cache before starting to handle each event.
2539 Target was running and cache could be stale. This is just a
2540 heuristic. Running threads may modify target memory, but we
2541 don't get any event. */
2542 target_dcache_invalidate ();
2543
2544 /* Go through handle_inferior_event/normal_stop, so we always
2545 have consistent output as if the stop event had been
2546 reported. */
2547 ecs->ptid = info->ptid;
2548 ecs->event_thread = find_thread_ptid (info->ptid);
2549 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2550 ecs->ws.value.sig = GDB_SIGNAL_0;
2551
2552 handle_inferior_event (ecs);
2553
2554 if (!ecs->wait_some_more)
2555 {
2556 struct thread_info *tp;
2557
2558 normal_stop ();
2559
2560 /* Finish off the continuations. */
2561 tp = inferior_thread ();
2562 do_all_intermediate_continuations_thread (tp, 1);
2563 do_all_continuations_thread (tp, 1);
2564 }
2565
2566 do_cleanups (old_chain);
2567 }
2568
2569 return 0;
2570 }
2571
2572 /* This function is attached as a "thread_stop_requested" observer.
2573 Cleanup local state that assumed the PTID was to be resumed, and
2574 report the stop to the frontend. */
2575
2576 static void
2577 infrun_thread_stop_requested (ptid_t ptid)
2578 {
2579 struct displaced_step_inferior_state *displaced;
2580
2581 /* PTID was requested to stop. Remove it from the displaced
2582 stepping queue, so we don't try to resume it automatically. */
2583
2584 for (displaced = displaced_step_inferior_states;
2585 displaced;
2586 displaced = displaced->next)
2587 {
2588 struct displaced_step_request *it, **prev_next_p;
2589
2590 it = displaced->step_request_queue;
2591 prev_next_p = &displaced->step_request_queue;
2592 while (it)
2593 {
2594 if (ptid_match (it->ptid, ptid))
2595 {
2596 *prev_next_p = it->next;
2597 it->next = NULL;
2598 xfree (it);
2599 }
2600 else
2601 {
2602 prev_next_p = &it->next;
2603 }
2604
2605 it = *prev_next_p;
2606 }
2607 }
2608
2609 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2610 }
2611
2612 static void
2613 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2614 {
2615 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2616 nullify_last_target_wait_ptid ();
2617 }
2618
2619 /* Callback for iterate_over_threads. */
2620
2621 static int
2622 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2623 {
2624 if (is_exited (info->ptid))
2625 return 0;
2626
2627 delete_step_resume_breakpoint (info);
2628 delete_exception_resume_breakpoint (info);
2629 return 0;
2630 }
2631
2632 /* In all-stop, delete the step resume breakpoint of any thread that
2633 had one. In non-stop, delete the step resume breakpoint of the
2634 thread that just stopped. */
2635
2636 static void
2637 delete_step_thread_step_resume_breakpoint (void)
2638 {
2639 if (!target_has_execution
2640 || ptid_equal (inferior_ptid, null_ptid))
2641 /* If the inferior has exited, we have already deleted the step
2642 resume breakpoints out of GDB's lists. */
2643 return;
2644
2645 if (non_stop)
2646 {
2647 /* If in non-stop mode, only delete the step-resume or
2648 longjmp-resume breakpoint of the thread that just stopped
2649 stepping. */
2650 struct thread_info *tp = inferior_thread ();
2651
2652 delete_step_resume_breakpoint (tp);
2653 delete_exception_resume_breakpoint (tp);
2654 }
2655 else
2656 /* In all-stop mode, delete all step-resume and longjmp-resume
2657 breakpoints of any thread that had them. */
2658 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2659 }
2660
2661 /* A cleanup wrapper. */
2662
2663 static void
2664 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2665 {
2666 delete_step_thread_step_resume_breakpoint ();
2667 }
2668
2669 /* Pretty print the results of target_wait, for debugging purposes. */
2670
2671 static void
2672 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2673 const struct target_waitstatus *ws)
2674 {
2675 char *status_string = target_waitstatus_to_string (ws);
2676 struct ui_file *tmp_stream = mem_fileopen ();
2677 char *text;
2678
2679 /* The text is split over several lines because it was getting too long.
2680 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2681 output as a unit; we want only one timestamp printed if debug_timestamp
2682 is set. */
2683
2684 fprintf_unfiltered (tmp_stream,
2685 "infrun: target_wait (%d", ptid_get_pid (waiton_ptid));
2686 if (ptid_get_pid (waiton_ptid) != -1)
2687 fprintf_unfiltered (tmp_stream,
2688 " [%s]", target_pid_to_str (waiton_ptid));
2689 fprintf_unfiltered (tmp_stream, ", status) =\n");
2690 fprintf_unfiltered (tmp_stream,
2691 "infrun: %d [%s],\n",
2692 ptid_get_pid (result_ptid),
2693 target_pid_to_str (result_ptid));
2694 fprintf_unfiltered (tmp_stream,
2695 "infrun: %s\n",
2696 status_string);
2697
2698 text = ui_file_xstrdup (tmp_stream, NULL);
2699
2700 /* This uses %s in part to handle %'s in the text, but also to avoid
2701 a gcc error: the format attribute requires a string literal. */
2702 fprintf_unfiltered (gdb_stdlog, "%s", text);
2703
2704 xfree (status_string);
2705 xfree (text);
2706 ui_file_delete (tmp_stream);
2707 }
2708
2709 /* Prepare and stabilize the inferior for detaching it. E.g.,
2710 detaching while a thread is displaced stepping is a recipe for
2711 crashing it, as nothing would readjust the PC out of the scratch
2712 pad. */
2713
2714 void
2715 prepare_for_detach (void)
2716 {
2717 struct inferior *inf = current_inferior ();
2718 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2719 struct cleanup *old_chain_1;
2720 struct displaced_step_inferior_state *displaced;
2721
2722 displaced = get_displaced_stepping_state (inf->pid);
2723
2724 /* Is any thread of this process displaced stepping? If not,
2725 there's nothing else to do. */
2726 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2727 return;
2728
2729 if (debug_infrun)
2730 fprintf_unfiltered (gdb_stdlog,
2731 "displaced-stepping in-process while detaching");
2732
2733 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2734 inf->detaching = 1;
2735
2736 while (!ptid_equal (displaced->step_ptid, null_ptid))
2737 {
2738 struct cleanup *old_chain_2;
2739 struct execution_control_state ecss;
2740 struct execution_control_state *ecs;
2741
2742 ecs = &ecss;
2743 memset (ecs, 0, sizeof (*ecs));
2744
2745 overlay_cache_invalid = 1;
2746 /* Flush target cache before starting to handle each event.
2747 Target was running and cache could be stale. This is just a
2748 heuristic. Running threads may modify target memory, but we
2749 don't get any event. */
2750 target_dcache_invalidate ();
2751
2752 if (deprecated_target_wait_hook)
2753 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2754 else
2755 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2756
2757 if (debug_infrun)
2758 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2759
2760 /* If an error happens while handling the event, propagate GDB's
2761 knowledge of the executing state to the frontend/user running
2762 state. */
2763 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2764 &minus_one_ptid);
2765
2766 /* Now figure out what to do with the result of the result. */
2767 handle_inferior_event (ecs);
2768
2769 /* No error, don't finish the state yet. */
2770 discard_cleanups (old_chain_2);
2771
2772 /* Breakpoints and watchpoints are not installed on the target
2773 at this point, and signals are passed directly to the
2774 inferior, so this must mean the process is gone. */
2775 if (!ecs->wait_some_more)
2776 {
2777 discard_cleanups (old_chain_1);
2778 error (_("Program exited while detaching"));
2779 }
2780 }
2781
2782 discard_cleanups (old_chain_1);
2783 }
2784
2785 /* Wait for control to return from inferior to debugger.
2786
2787 If inferior gets a signal, we may decide to start it up again
2788 instead of returning. That is why there is a loop in this function.
2789 When this function actually returns it means the inferior
2790 should be left stopped and GDB should read more commands. */
2791
2792 void
2793 wait_for_inferior (void)
2794 {
2795 struct cleanup *old_cleanups;
2796
2797 if (debug_infrun)
2798 fprintf_unfiltered
2799 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2800
2801 old_cleanups =
2802 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2803
2804 while (1)
2805 {
2806 struct execution_control_state ecss;
2807 struct execution_control_state *ecs = &ecss;
2808 struct cleanup *old_chain;
2809
2810 memset (ecs, 0, sizeof (*ecs));
2811
2812 overlay_cache_invalid = 1;
2813
2814 /* Flush target cache before starting to handle each event.
2815 Target was running and cache could be stale. This is just a
2816 heuristic. Running threads may modify target memory, but we
2817 don't get any event. */
2818 target_dcache_invalidate ();
2819
2820 if (deprecated_target_wait_hook)
2821 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2822 else
2823 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2824
2825 if (debug_infrun)
2826 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2827
2828 /* If an error happens while handling the event, propagate GDB's
2829 knowledge of the executing state to the frontend/user running
2830 state. */
2831 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2832
2833 /* Now figure out what to do with the result of the result. */
2834 handle_inferior_event (ecs);
2835
2836 /* No error, don't finish the state yet. */
2837 discard_cleanups (old_chain);
2838
2839 if (!ecs->wait_some_more)
2840 break;
2841 }
2842
2843 do_cleanups (old_cleanups);
2844 }
2845
2846 /* Asynchronous version of wait_for_inferior. It is called by the
2847 event loop whenever a change of state is detected on the file
2848 descriptor corresponding to the target. It can be called more than
2849 once to complete a single execution command. In such cases we need
2850 to keep the state in a global variable ECSS. If it is the last time
2851 that this function is called for a single execution command, then
2852 report to the user that the inferior has stopped, and do the
2853 necessary cleanups. */
2854
2855 void
2856 fetch_inferior_event (void *client_data)
2857 {
2858 struct execution_control_state ecss;
2859 struct execution_control_state *ecs = &ecss;
2860 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2861 struct cleanup *ts_old_chain;
2862 int was_sync = sync_execution;
2863 int cmd_done = 0;
2864
2865 memset (ecs, 0, sizeof (*ecs));
2866
2867 /* We're handling a live event, so make sure we're doing live
2868 debugging. If we're looking at traceframes while the target is
2869 running, we're going to need to get back to that mode after
2870 handling the event. */
2871 if (non_stop)
2872 {
2873 make_cleanup_restore_current_traceframe ();
2874 set_current_traceframe (-1);
2875 }
2876
2877 if (non_stop)
2878 /* In non-stop mode, the user/frontend should not notice a thread
2879 switch due to internal events. Make sure we reverse to the
2880 user selected thread and frame after handling the event and
2881 running any breakpoint commands. */
2882 make_cleanup_restore_current_thread ();
2883
2884 overlay_cache_invalid = 1;
2885 /* Flush target cache before starting to handle each event. Target
2886 was running and cache could be stale. This is just a heuristic.
2887 Running threads may modify target memory, but we don't get any
2888 event. */
2889 target_dcache_invalidate ();
2890
2891 make_cleanup_restore_integer (&execution_direction);
2892 execution_direction = target_execution_direction ();
2893
2894 if (deprecated_target_wait_hook)
2895 ecs->ptid =
2896 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2897 else
2898 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2899
2900 if (debug_infrun)
2901 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2902
2903 /* If an error happens while handling the event, propagate GDB's
2904 knowledge of the executing state to the frontend/user running
2905 state. */
2906 if (!non_stop)
2907 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2908 else
2909 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2910
2911 /* Get executed before make_cleanup_restore_current_thread above to apply
2912 still for the thread which has thrown the exception. */
2913 make_bpstat_clear_actions_cleanup ();
2914
2915 /* Now figure out what to do with the result of the result. */
2916 handle_inferior_event (ecs);
2917
2918 if (!ecs->wait_some_more)
2919 {
2920 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2921
2922 delete_step_thread_step_resume_breakpoint ();
2923
2924 /* We may not find an inferior if this was a process exit. */
2925 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2926 normal_stop ();
2927
2928 if (target_has_execution
2929 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2930 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2931 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2932 && ecs->event_thread->step_multi
2933 && ecs->event_thread->control.stop_step)
2934 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2935 else
2936 {
2937 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2938 cmd_done = 1;
2939 }
2940 }
2941
2942 /* No error, don't finish the thread states yet. */
2943 discard_cleanups (ts_old_chain);
2944
2945 /* Revert thread and frame. */
2946 do_cleanups (old_chain);
2947
2948 /* If the inferior was in sync execution mode, and now isn't,
2949 restore the prompt (a synchronous execution command has finished,
2950 and we're ready for input). */
2951 if (interpreter_async && was_sync && !sync_execution)
2952 observer_notify_sync_execution_done ();
2953
2954 if (cmd_done
2955 && !was_sync
2956 && exec_done_display_p
2957 && (ptid_equal (inferior_ptid, null_ptid)
2958 || !is_running (inferior_ptid)))
2959 printf_unfiltered (_("completed.\n"));
2960 }
2961
2962 /* Record the frame and location we're currently stepping through. */
2963 void
2964 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2965 {
2966 struct thread_info *tp = inferior_thread ();
2967
2968 tp->control.step_frame_id = get_frame_id (frame);
2969 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2970
2971 tp->current_symtab = sal.symtab;
2972 tp->current_line = sal.line;
2973 }
2974
2975 /* Clear context switchable stepping state. */
2976
2977 void
2978 init_thread_stepping_state (struct thread_info *tss)
2979 {
2980 tss->stepping_over_breakpoint = 0;
2981 tss->step_after_step_resume_breakpoint = 0;
2982 }
2983
2984 /* Set the cached copy of the last ptid/waitstatus. */
2985
2986 static void
2987 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
2988 {
2989 target_last_wait_ptid = ptid;
2990 target_last_waitstatus = status;
2991 }
2992
2993 /* Return the cached copy of the last pid/waitstatus returned by
2994 target_wait()/deprecated_target_wait_hook(). The data is actually
2995 cached by handle_inferior_event(), which gets called immediately
2996 after target_wait()/deprecated_target_wait_hook(). */
2997
2998 void
2999 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3000 {
3001 *ptidp = target_last_wait_ptid;
3002 *status = target_last_waitstatus;
3003 }
3004
3005 void
3006 nullify_last_target_wait_ptid (void)
3007 {
3008 target_last_wait_ptid = minus_one_ptid;
3009 }
3010
3011 /* Switch thread contexts. */
3012
3013 static void
3014 context_switch (ptid_t ptid)
3015 {
3016 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3017 {
3018 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3019 target_pid_to_str (inferior_ptid));
3020 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3021 target_pid_to_str (ptid));
3022 }
3023
3024 switch_to_thread (ptid);
3025 }
3026
3027 static void
3028 adjust_pc_after_break (struct execution_control_state *ecs)
3029 {
3030 struct regcache *regcache;
3031 struct gdbarch *gdbarch;
3032 struct address_space *aspace;
3033 CORE_ADDR breakpoint_pc, decr_pc;
3034
3035 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3036 we aren't, just return.
3037
3038 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3039 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3040 implemented by software breakpoints should be handled through the normal
3041 breakpoint layer.
3042
3043 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3044 different signals (SIGILL or SIGEMT for instance), but it is less
3045 clear where the PC is pointing afterwards. It may not match
3046 gdbarch_decr_pc_after_break. I don't know any specific target that
3047 generates these signals at breakpoints (the code has been in GDB since at
3048 least 1992) so I can not guess how to handle them here.
3049
3050 In earlier versions of GDB, a target with
3051 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3052 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3053 target with both of these set in GDB history, and it seems unlikely to be
3054 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3055
3056 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3057 return;
3058
3059 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3060 return;
3061
3062 /* In reverse execution, when a breakpoint is hit, the instruction
3063 under it has already been de-executed. The reported PC always
3064 points at the breakpoint address, so adjusting it further would
3065 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3066 architecture:
3067
3068 B1 0x08000000 : INSN1
3069 B2 0x08000001 : INSN2
3070 0x08000002 : INSN3
3071 PC -> 0x08000003 : INSN4
3072
3073 Say you're stopped at 0x08000003 as above. Reverse continuing
3074 from that point should hit B2 as below. Reading the PC when the
3075 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3076 been de-executed already.
3077
3078 B1 0x08000000 : INSN1
3079 B2 PC -> 0x08000001 : INSN2
3080 0x08000002 : INSN3
3081 0x08000003 : INSN4
3082
3083 We can't apply the same logic as for forward execution, because
3084 we would wrongly adjust the PC to 0x08000000, since there's a
3085 breakpoint at PC - 1. We'd then report a hit on B1, although
3086 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3087 behaviour. */
3088 if (execution_direction == EXEC_REVERSE)
3089 return;
3090
3091 /* If this target does not decrement the PC after breakpoints, then
3092 we have nothing to do. */
3093 regcache = get_thread_regcache (ecs->ptid);
3094 gdbarch = get_regcache_arch (regcache);
3095
3096 decr_pc = target_decr_pc_after_break (gdbarch);
3097 if (decr_pc == 0)
3098 return;
3099
3100 aspace = get_regcache_aspace (regcache);
3101
3102 /* Find the location where (if we've hit a breakpoint) the
3103 breakpoint would be. */
3104 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3105
3106 /* Check whether there actually is a software breakpoint inserted at
3107 that location.
3108
3109 If in non-stop mode, a race condition is possible where we've
3110 removed a breakpoint, but stop events for that breakpoint were
3111 already queued and arrive later. To suppress those spurious
3112 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3113 and retire them after a number of stop events are reported. */
3114 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3115 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3116 {
3117 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3118
3119 if (record_full_is_used ())
3120 record_full_gdb_operation_disable_set ();
3121
3122 /* When using hardware single-step, a SIGTRAP is reported for both
3123 a completed single-step and a software breakpoint. Need to
3124 differentiate between the two, as the latter needs adjusting
3125 but the former does not.
3126
3127 The SIGTRAP can be due to a completed hardware single-step only if
3128 - we didn't insert software single-step breakpoints
3129 - the thread to be examined is still the current thread
3130 - this thread is currently being stepped
3131
3132 If any of these events did not occur, we must have stopped due
3133 to hitting a software breakpoint, and have to back up to the
3134 breakpoint address.
3135
3136 As a special case, we could have hardware single-stepped a
3137 software breakpoint. In this case (prev_pc == breakpoint_pc),
3138 we also need to back up to the breakpoint address. */
3139
3140 if (singlestep_breakpoints_inserted_p
3141 || !ptid_equal (ecs->ptid, inferior_ptid)
3142 || !currently_stepping (ecs->event_thread)
3143 || ecs->event_thread->prev_pc == breakpoint_pc)
3144 regcache_write_pc (regcache, breakpoint_pc);
3145
3146 do_cleanups (old_cleanups);
3147 }
3148 }
3149
3150 static void
3151 init_infwait_state (void)
3152 {
3153 waiton_ptid = pid_to_ptid (-1);
3154 infwait_state = infwait_normal_state;
3155 }
3156
3157 static int
3158 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3159 {
3160 for (frame = get_prev_frame (frame);
3161 frame != NULL;
3162 frame = get_prev_frame (frame))
3163 {
3164 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3165 return 1;
3166 if (get_frame_type (frame) != INLINE_FRAME)
3167 break;
3168 }
3169
3170 return 0;
3171 }
3172
3173 /* Auxiliary function that handles syscall entry/return events.
3174 It returns 1 if the inferior should keep going (and GDB
3175 should ignore the event), or 0 if the event deserves to be
3176 processed. */
3177
3178 static int
3179 handle_syscall_event (struct execution_control_state *ecs)
3180 {
3181 struct regcache *regcache;
3182 int syscall_number;
3183
3184 if (!ptid_equal (ecs->ptid, inferior_ptid))
3185 context_switch (ecs->ptid);
3186
3187 regcache = get_thread_regcache (ecs->ptid);
3188 syscall_number = ecs->ws.value.syscall_number;
3189 stop_pc = regcache_read_pc (regcache);
3190
3191 if (catch_syscall_enabled () > 0
3192 && catching_syscall_number (syscall_number) > 0)
3193 {
3194 if (debug_infrun)
3195 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3196 syscall_number);
3197
3198 ecs->event_thread->control.stop_bpstat
3199 = bpstat_stop_status (get_regcache_aspace (regcache),
3200 stop_pc, ecs->ptid, &ecs->ws);
3201
3202 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3203 {
3204 /* Catchpoint hit. */
3205 return 0;
3206 }
3207 }
3208
3209 /* If no catchpoint triggered for this, then keep going. */
3210 keep_going (ecs);
3211 return 1;
3212 }
3213
3214 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3215
3216 static void
3217 fill_in_stop_func (struct gdbarch *gdbarch,
3218 struct execution_control_state *ecs)
3219 {
3220 if (!ecs->stop_func_filled_in)
3221 {
3222 /* Don't care about return value; stop_func_start and stop_func_name
3223 will both be 0 if it doesn't work. */
3224 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3225 &ecs->stop_func_start, &ecs->stop_func_end);
3226 ecs->stop_func_start
3227 += gdbarch_deprecated_function_start_offset (gdbarch);
3228
3229 if (gdbarch_skip_entrypoint_p (gdbarch))
3230 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3231 ecs->stop_func_start);
3232
3233 ecs->stop_func_filled_in = 1;
3234 }
3235 }
3236
3237
3238 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3239
3240 static enum stop_kind
3241 get_inferior_stop_soon (ptid_t ptid)
3242 {
3243 struct inferior *inf = find_inferior_pid (ptid_get_pid (ptid));
3244
3245 gdb_assert (inf != NULL);
3246 return inf->control.stop_soon;
3247 }
3248
3249 /* Given an execution control state that has been freshly filled in by
3250 an event from the inferior, figure out what it means and take
3251 appropriate action.
3252
3253 The alternatives are:
3254
3255 1) stop_stepping and return; to really stop and return to the
3256 debugger.
3257
3258 2) keep_going and return; to wait for the next event (set
3259 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3260 once). */
3261
3262 static void
3263 handle_inferior_event (struct execution_control_state *ecs)
3264 {
3265 enum stop_kind stop_soon;
3266
3267 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3268 {
3269 /* We had an event in the inferior, but we are not interested in
3270 handling it at this level. The lower layers have already
3271 done what needs to be done, if anything.
3272
3273 One of the possible circumstances for this is when the
3274 inferior produces output for the console. The inferior has
3275 not stopped, and we are ignoring the event. Another possible
3276 circumstance is any event which the lower level knows will be
3277 reported multiple times without an intervening resume. */
3278 if (debug_infrun)
3279 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3280 prepare_to_wait (ecs);
3281 return;
3282 }
3283
3284 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3285 && target_can_async_p () && !sync_execution)
3286 {
3287 /* There were no unwaited-for children left in the target, but,
3288 we're not synchronously waiting for events either. Just
3289 ignore. Otherwise, if we were running a synchronous
3290 execution command, we need to cancel it and give the user
3291 back the terminal. */
3292 if (debug_infrun)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3295 prepare_to_wait (ecs);
3296 return;
3297 }
3298
3299 /* Cache the last pid/waitstatus. */
3300 set_last_target_status (ecs->ptid, ecs->ws);
3301
3302 /* Always clear state belonging to the previous time we stopped. */
3303 stop_stack_dummy = STOP_NONE;
3304
3305 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3306 {
3307 /* No unwaited-for children left. IOW, all resumed children
3308 have exited. */
3309 if (debug_infrun)
3310 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3311
3312 stop_print_frame = 0;
3313 stop_stepping (ecs);
3314 return;
3315 }
3316
3317 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3318 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3319 {
3320 ecs->event_thread = find_thread_ptid (ecs->ptid);
3321 /* If it's a new thread, add it to the thread database. */
3322 if (ecs->event_thread == NULL)
3323 ecs->event_thread = add_thread (ecs->ptid);
3324
3325 /* Disable range stepping. If the next step request could use a
3326 range, this will be end up re-enabled then. */
3327 ecs->event_thread->control.may_range_step = 0;
3328 }
3329
3330 /* Dependent on valid ECS->EVENT_THREAD. */
3331 adjust_pc_after_break (ecs);
3332
3333 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3334 reinit_frame_cache ();
3335
3336 breakpoint_retire_moribund ();
3337
3338 /* First, distinguish signals caused by the debugger from signals
3339 that have to do with the program's own actions. Note that
3340 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3341 on the operating system version. Here we detect when a SIGILL or
3342 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3343 something similar for SIGSEGV, since a SIGSEGV will be generated
3344 when we're trying to execute a breakpoint instruction on a
3345 non-executable stack. This happens for call dummy breakpoints
3346 for architectures like SPARC that place call dummies on the
3347 stack. */
3348 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3349 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3350 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3351 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3352 {
3353 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3354
3355 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3356 regcache_read_pc (regcache)))
3357 {
3358 if (debug_infrun)
3359 fprintf_unfiltered (gdb_stdlog,
3360 "infrun: Treating signal as SIGTRAP\n");
3361 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3362 }
3363 }
3364
3365 /* Mark the non-executing threads accordingly. In all-stop, all
3366 threads of all processes are stopped when we get any event
3367 reported. In non-stop mode, only the event thread stops. If
3368 we're handling a process exit in non-stop mode, there's nothing
3369 to do, as threads of the dead process are gone, and threads of
3370 any other process were left running. */
3371 if (!non_stop)
3372 set_executing (minus_one_ptid, 0);
3373 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3374 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3375 set_executing (ecs->ptid, 0);
3376
3377 switch (infwait_state)
3378 {
3379 case infwait_normal_state:
3380 if (debug_infrun)
3381 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3382 break;
3383
3384 case infwait_step_watch_state:
3385 if (debug_infrun)
3386 fprintf_unfiltered (gdb_stdlog,
3387 "infrun: infwait_step_watch_state\n");
3388
3389 ecs->stepped_after_stopped_by_watchpoint = 1;
3390 break;
3391
3392 case infwait_nonstep_watch_state:
3393 if (debug_infrun)
3394 fprintf_unfiltered (gdb_stdlog,
3395 "infrun: infwait_nonstep_watch_state\n");
3396 insert_breakpoints ();
3397
3398 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3399 handle things like signals arriving and other things happening
3400 in combination correctly? */
3401 ecs->stepped_after_stopped_by_watchpoint = 1;
3402 break;
3403
3404 default:
3405 internal_error (__FILE__, __LINE__, _("bad switch"));
3406 }
3407
3408 infwait_state = infwait_normal_state;
3409 waiton_ptid = pid_to_ptid (-1);
3410
3411 switch (ecs->ws.kind)
3412 {
3413 case TARGET_WAITKIND_LOADED:
3414 if (debug_infrun)
3415 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3416 if (!ptid_equal (ecs->ptid, inferior_ptid))
3417 context_switch (ecs->ptid);
3418 /* Ignore gracefully during startup of the inferior, as it might
3419 be the shell which has just loaded some objects, otherwise
3420 add the symbols for the newly loaded objects. Also ignore at
3421 the beginning of an attach or remote session; we will query
3422 the full list of libraries once the connection is
3423 established. */
3424
3425 stop_soon = get_inferior_stop_soon (ecs->ptid);
3426 if (stop_soon == NO_STOP_QUIETLY)
3427 {
3428 struct regcache *regcache;
3429
3430 regcache = get_thread_regcache (ecs->ptid);
3431
3432 handle_solib_event ();
3433
3434 ecs->event_thread->control.stop_bpstat
3435 = bpstat_stop_status (get_regcache_aspace (regcache),
3436 stop_pc, ecs->ptid, &ecs->ws);
3437
3438 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3439 {
3440 /* A catchpoint triggered. */
3441 process_event_stop_test (ecs);
3442 return;
3443 }
3444
3445 /* If requested, stop when the dynamic linker notifies
3446 gdb of events. This allows the user to get control
3447 and place breakpoints in initializer routines for
3448 dynamically loaded objects (among other things). */
3449 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3450 if (stop_on_solib_events)
3451 {
3452 /* Make sure we print "Stopped due to solib-event" in
3453 normal_stop. */
3454 stop_print_frame = 1;
3455
3456 stop_stepping (ecs);
3457 return;
3458 }
3459 }
3460
3461 /* If we are skipping through a shell, or through shared library
3462 loading that we aren't interested in, resume the program. If
3463 we're running the program normally, also resume. */
3464 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3465 {
3466 /* Loading of shared libraries might have changed breakpoint
3467 addresses. Make sure new breakpoints are inserted. */
3468 if (stop_soon == NO_STOP_QUIETLY
3469 && !breakpoints_always_inserted_mode ())
3470 insert_breakpoints ();
3471 resume (0, GDB_SIGNAL_0);
3472 prepare_to_wait (ecs);
3473 return;
3474 }
3475
3476 /* But stop if we're attaching or setting up a remote
3477 connection. */
3478 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3479 || stop_soon == STOP_QUIETLY_REMOTE)
3480 {
3481 if (debug_infrun)
3482 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3483 stop_stepping (ecs);
3484 return;
3485 }
3486
3487 internal_error (__FILE__, __LINE__,
3488 _("unhandled stop_soon: %d"), (int) stop_soon);
3489
3490 case TARGET_WAITKIND_SPURIOUS:
3491 if (debug_infrun)
3492 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3493 if (!ptid_equal (ecs->ptid, inferior_ptid))
3494 context_switch (ecs->ptid);
3495 resume (0, GDB_SIGNAL_0);
3496 prepare_to_wait (ecs);
3497 return;
3498
3499 case TARGET_WAITKIND_EXITED:
3500 case TARGET_WAITKIND_SIGNALLED:
3501 if (debug_infrun)
3502 {
3503 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3504 fprintf_unfiltered (gdb_stdlog,
3505 "infrun: TARGET_WAITKIND_EXITED\n");
3506 else
3507 fprintf_unfiltered (gdb_stdlog,
3508 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3509 }
3510
3511 inferior_ptid = ecs->ptid;
3512 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3513 set_current_program_space (current_inferior ()->pspace);
3514 handle_vfork_child_exec_or_exit (0);
3515 target_terminal_ours (); /* Must do this before mourn anyway. */
3516
3517 /* Clearing any previous state of convenience variables. */
3518 clear_exit_convenience_vars ();
3519
3520 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3521 {
3522 /* Record the exit code in the convenience variable $_exitcode, so
3523 that the user can inspect this again later. */
3524 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3525 (LONGEST) ecs->ws.value.integer);
3526
3527 /* Also record this in the inferior itself. */
3528 current_inferior ()->has_exit_code = 1;
3529 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3530
3531 /* Support the --return-child-result option. */
3532 return_child_result_value = ecs->ws.value.integer;
3533
3534 observer_notify_exited (ecs->ws.value.integer);
3535 }
3536 else
3537 {
3538 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3539 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3540
3541 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3542 {
3543 /* Set the value of the internal variable $_exitsignal,
3544 which holds the signal uncaught by the inferior. */
3545 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3546 gdbarch_gdb_signal_to_target (gdbarch,
3547 ecs->ws.value.sig));
3548 }
3549 else
3550 {
3551 /* We don't have access to the target's method used for
3552 converting between signal numbers (GDB's internal
3553 representation <-> target's representation).
3554 Therefore, we cannot do a good job at displaying this
3555 information to the user. It's better to just warn
3556 her about it (if infrun debugging is enabled), and
3557 give up. */
3558 if (debug_infrun)
3559 fprintf_filtered (gdb_stdlog, _("\
3560 Cannot fill $_exitsignal with the correct signal number.\n"));
3561 }
3562
3563 observer_notify_signal_exited (ecs->ws.value.sig);
3564 }
3565
3566 gdb_flush (gdb_stdout);
3567 target_mourn_inferior ();
3568 singlestep_breakpoints_inserted_p = 0;
3569 cancel_single_step_breakpoints ();
3570 stop_print_frame = 0;
3571 stop_stepping (ecs);
3572 return;
3573
3574 /* The following are the only cases in which we keep going;
3575 the above cases end in a continue or goto. */
3576 case TARGET_WAITKIND_FORKED:
3577 case TARGET_WAITKIND_VFORKED:
3578 if (debug_infrun)
3579 {
3580 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3581 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3582 else
3583 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3584 }
3585
3586 /* Check whether the inferior is displaced stepping. */
3587 {
3588 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3589 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3590 struct displaced_step_inferior_state *displaced
3591 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3592
3593 /* If checking displaced stepping is supported, and thread
3594 ecs->ptid is displaced stepping. */
3595 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3596 {
3597 struct inferior *parent_inf
3598 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3599 struct regcache *child_regcache;
3600 CORE_ADDR parent_pc;
3601
3602 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3603 indicating that the displaced stepping of syscall instruction
3604 has been done. Perform cleanup for parent process here. Note
3605 that this operation also cleans up the child process for vfork,
3606 because their pages are shared. */
3607 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3608
3609 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3610 {
3611 /* Restore scratch pad for child process. */
3612 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3613 }
3614
3615 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3616 the child's PC is also within the scratchpad. Set the child's PC
3617 to the parent's PC value, which has already been fixed up.
3618 FIXME: we use the parent's aspace here, although we're touching
3619 the child, because the child hasn't been added to the inferior
3620 list yet at this point. */
3621
3622 child_regcache
3623 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3624 gdbarch,
3625 parent_inf->aspace);
3626 /* Read PC value of parent process. */
3627 parent_pc = regcache_read_pc (regcache);
3628
3629 if (debug_displaced)
3630 fprintf_unfiltered (gdb_stdlog,
3631 "displaced: write child pc from %s to %s\n",
3632 paddress (gdbarch,
3633 regcache_read_pc (child_regcache)),
3634 paddress (gdbarch, parent_pc));
3635
3636 regcache_write_pc (child_regcache, parent_pc);
3637 }
3638 }
3639
3640 if (!ptid_equal (ecs->ptid, inferior_ptid))
3641 context_switch (ecs->ptid);
3642
3643 /* Immediately detach breakpoints from the child before there's
3644 any chance of letting the user delete breakpoints from the
3645 breakpoint lists. If we don't do this early, it's easy to
3646 leave left over traps in the child, vis: "break foo; catch
3647 fork; c; <fork>; del; c; <child calls foo>". We only follow
3648 the fork on the last `continue', and by that time the
3649 breakpoint at "foo" is long gone from the breakpoint table.
3650 If we vforked, then we don't need to unpatch here, since both
3651 parent and child are sharing the same memory pages; we'll
3652 need to unpatch at follow/detach time instead to be certain
3653 that new breakpoints added between catchpoint hit time and
3654 vfork follow are detached. */
3655 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3656 {
3657 /* This won't actually modify the breakpoint list, but will
3658 physically remove the breakpoints from the child. */
3659 detach_breakpoints (ecs->ws.value.related_pid);
3660 }
3661
3662 if (singlestep_breakpoints_inserted_p)
3663 {
3664 /* Pull the single step breakpoints out of the target. */
3665 remove_single_step_breakpoints ();
3666 singlestep_breakpoints_inserted_p = 0;
3667 }
3668
3669 /* In case the event is caught by a catchpoint, remember that
3670 the event is to be followed at the next resume of the thread,
3671 and not immediately. */
3672 ecs->event_thread->pending_follow = ecs->ws;
3673
3674 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3675
3676 ecs->event_thread->control.stop_bpstat
3677 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3678 stop_pc, ecs->ptid, &ecs->ws);
3679
3680 /* If no catchpoint triggered for this, then keep going. Note
3681 that we're interested in knowing the bpstat actually causes a
3682 stop, not just if it may explain the signal. Software
3683 watchpoints, for example, always appear in the bpstat. */
3684 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3685 {
3686 ptid_t parent;
3687 ptid_t child;
3688 int should_resume;
3689 int follow_child
3690 = (follow_fork_mode_string == follow_fork_mode_child);
3691
3692 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3693
3694 should_resume = follow_fork ();
3695
3696 parent = ecs->ptid;
3697 child = ecs->ws.value.related_pid;
3698
3699 /* In non-stop mode, also resume the other branch. */
3700 if (non_stop && !detach_fork)
3701 {
3702 if (follow_child)
3703 switch_to_thread (parent);
3704 else
3705 switch_to_thread (child);
3706
3707 ecs->event_thread = inferior_thread ();
3708 ecs->ptid = inferior_ptid;
3709 keep_going (ecs);
3710 }
3711
3712 if (follow_child)
3713 switch_to_thread (child);
3714 else
3715 switch_to_thread (parent);
3716
3717 ecs->event_thread = inferior_thread ();
3718 ecs->ptid = inferior_ptid;
3719
3720 if (should_resume)
3721 keep_going (ecs);
3722 else
3723 stop_stepping (ecs);
3724 return;
3725 }
3726 process_event_stop_test (ecs);
3727 return;
3728
3729 case TARGET_WAITKIND_VFORK_DONE:
3730 /* Done with the shared memory region. Re-insert breakpoints in
3731 the parent, and keep going. */
3732
3733 if (debug_infrun)
3734 fprintf_unfiltered (gdb_stdlog,
3735 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3736
3737 if (!ptid_equal (ecs->ptid, inferior_ptid))
3738 context_switch (ecs->ptid);
3739
3740 current_inferior ()->waiting_for_vfork_done = 0;
3741 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3742 /* This also takes care of reinserting breakpoints in the
3743 previously locked inferior. */
3744 keep_going (ecs);
3745 return;
3746
3747 case TARGET_WAITKIND_EXECD:
3748 if (debug_infrun)
3749 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3750
3751 if (!ptid_equal (ecs->ptid, inferior_ptid))
3752 context_switch (ecs->ptid);
3753
3754 singlestep_breakpoints_inserted_p = 0;
3755 cancel_single_step_breakpoints ();
3756
3757 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3758
3759 /* Do whatever is necessary to the parent branch of the vfork. */
3760 handle_vfork_child_exec_or_exit (1);
3761
3762 /* This causes the eventpoints and symbol table to be reset.
3763 Must do this now, before trying to determine whether to
3764 stop. */
3765 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3766
3767 ecs->event_thread->control.stop_bpstat
3768 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3769 stop_pc, ecs->ptid, &ecs->ws);
3770
3771 /* Note that this may be referenced from inside
3772 bpstat_stop_status above, through inferior_has_execd. */
3773 xfree (ecs->ws.value.execd_pathname);
3774 ecs->ws.value.execd_pathname = NULL;
3775
3776 /* If no catchpoint triggered for this, then keep going. */
3777 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3778 {
3779 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3780 keep_going (ecs);
3781 return;
3782 }
3783 process_event_stop_test (ecs);
3784 return;
3785
3786 /* Be careful not to try to gather much state about a thread
3787 that's in a syscall. It's frequently a losing proposition. */
3788 case TARGET_WAITKIND_SYSCALL_ENTRY:
3789 if (debug_infrun)
3790 fprintf_unfiltered (gdb_stdlog,
3791 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3792 /* Getting the current syscall number. */
3793 if (handle_syscall_event (ecs) == 0)
3794 process_event_stop_test (ecs);
3795 return;
3796
3797 /* Before examining the threads further, step this thread to
3798 get it entirely out of the syscall. (We get notice of the
3799 event when the thread is just on the verge of exiting a
3800 syscall. Stepping one instruction seems to get it back
3801 into user code.) */
3802 case TARGET_WAITKIND_SYSCALL_RETURN:
3803 if (debug_infrun)
3804 fprintf_unfiltered (gdb_stdlog,
3805 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3806 if (handle_syscall_event (ecs) == 0)
3807 process_event_stop_test (ecs);
3808 return;
3809
3810 case TARGET_WAITKIND_STOPPED:
3811 if (debug_infrun)
3812 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3813 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3814 handle_signal_stop (ecs);
3815 return;
3816
3817 case TARGET_WAITKIND_NO_HISTORY:
3818 if (debug_infrun)
3819 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3820 /* Reverse execution: target ran out of history info. */
3821
3822 /* Pull the single step breakpoints out of the target. */
3823 if (singlestep_breakpoints_inserted_p)
3824 {
3825 if (!ptid_equal (ecs->ptid, inferior_ptid))
3826 context_switch (ecs->ptid);
3827 remove_single_step_breakpoints ();
3828 singlestep_breakpoints_inserted_p = 0;
3829 }
3830 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3831 observer_notify_no_history ();
3832 stop_stepping (ecs);
3833 return;
3834 }
3835 }
3836
3837 /* Come here when the program has stopped with a signal. */
3838
3839 static void
3840 handle_signal_stop (struct execution_control_state *ecs)
3841 {
3842 struct frame_info *frame;
3843 struct gdbarch *gdbarch;
3844 int stopped_by_watchpoint;
3845 enum stop_kind stop_soon;
3846 int random_signal;
3847
3848 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
3849
3850 /* Do we need to clean up the state of a thread that has
3851 completed a displaced single-step? (Doing so usually affects
3852 the PC, so do it here, before we set stop_pc.) */
3853 displaced_step_fixup (ecs->ptid,
3854 ecs->event_thread->suspend.stop_signal);
3855
3856 /* If we either finished a single-step or hit a breakpoint, but
3857 the user wanted this thread to be stopped, pretend we got a
3858 SIG0 (generic unsignaled stop). */
3859 if (ecs->event_thread->stop_requested
3860 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3861 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3862
3863 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3864
3865 if (debug_infrun)
3866 {
3867 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3868 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3869 struct cleanup *old_chain = save_inferior_ptid ();
3870
3871 inferior_ptid = ecs->ptid;
3872
3873 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3874 paddress (gdbarch, stop_pc));
3875 if (target_stopped_by_watchpoint ())
3876 {
3877 CORE_ADDR addr;
3878
3879 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3880
3881 if (target_stopped_data_address (&current_target, &addr))
3882 fprintf_unfiltered (gdb_stdlog,
3883 "infrun: stopped data address = %s\n",
3884 paddress (gdbarch, addr));
3885 else
3886 fprintf_unfiltered (gdb_stdlog,
3887 "infrun: (no data address available)\n");
3888 }
3889
3890 do_cleanups (old_chain);
3891 }
3892
3893 /* This is originated from start_remote(), start_inferior() and
3894 shared libraries hook functions. */
3895 stop_soon = get_inferior_stop_soon (ecs->ptid);
3896 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3897 {
3898 if (!ptid_equal (ecs->ptid, inferior_ptid))
3899 context_switch (ecs->ptid);
3900 if (debug_infrun)
3901 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3902 stop_print_frame = 1;
3903 stop_stepping (ecs);
3904 return;
3905 }
3906
3907 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3908 && stop_after_trap)
3909 {
3910 if (!ptid_equal (ecs->ptid, inferior_ptid))
3911 context_switch (ecs->ptid);
3912 if (debug_infrun)
3913 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3914 stop_print_frame = 0;
3915 stop_stepping (ecs);
3916 return;
3917 }
3918
3919 /* This originates from attach_command(). We need to overwrite
3920 the stop_signal here, because some kernels don't ignore a
3921 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3922 See more comments in inferior.h. On the other hand, if we
3923 get a non-SIGSTOP, report it to the user - assume the backend
3924 will handle the SIGSTOP if it should show up later.
3925
3926 Also consider that the attach is complete when we see a
3927 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3928 target extended-remote report it instead of a SIGSTOP
3929 (e.g. gdbserver). We already rely on SIGTRAP being our
3930 signal, so this is no exception.
3931
3932 Also consider that the attach is complete when we see a
3933 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3934 the target to stop all threads of the inferior, in case the
3935 low level attach operation doesn't stop them implicitly. If
3936 they weren't stopped implicitly, then the stub will report a
3937 GDB_SIGNAL_0, meaning: stopped for no particular reason
3938 other than GDB's request. */
3939 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3940 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
3941 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
3942 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
3943 {
3944 stop_print_frame = 1;
3945 stop_stepping (ecs);
3946 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3947 return;
3948 }
3949
3950 /* See if something interesting happened to the non-current thread. If
3951 so, then switch to that thread. */
3952 if (!ptid_equal (ecs->ptid, inferior_ptid))
3953 {
3954 if (debug_infrun)
3955 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3956
3957 context_switch (ecs->ptid);
3958
3959 if (deprecated_context_hook)
3960 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3961 }
3962
3963 /* At this point, get hold of the now-current thread's frame. */
3964 frame = get_current_frame ();
3965 gdbarch = get_frame_arch (frame);
3966
3967 /* Pull the single step breakpoints out of the target. */
3968 if (singlestep_breakpoints_inserted_p)
3969 {
3970 /* However, before doing so, if this single-step breakpoint was
3971 actually for another thread, set this thread up for moving
3972 past it. */
3973 if (!ptid_equal (ecs->ptid, singlestep_ptid)
3974 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3975 {
3976 struct regcache *regcache;
3977 struct address_space *aspace;
3978 CORE_ADDR pc;
3979
3980 regcache = get_thread_regcache (ecs->ptid);
3981 aspace = get_regcache_aspace (regcache);
3982 pc = regcache_read_pc (regcache);
3983 if (single_step_breakpoint_inserted_here_p (aspace, pc))
3984 {
3985 if (debug_infrun)
3986 {
3987 fprintf_unfiltered (gdb_stdlog,
3988 "infrun: [%s] hit step over single-step"
3989 " breakpoint of [%s]\n",
3990 target_pid_to_str (ecs->ptid),
3991 target_pid_to_str (singlestep_ptid));
3992 }
3993 ecs->hit_singlestep_breakpoint = 1;
3994 }
3995 }
3996
3997 remove_single_step_breakpoints ();
3998 singlestep_breakpoints_inserted_p = 0;
3999 }
4000
4001 if (ecs->stepped_after_stopped_by_watchpoint)
4002 stopped_by_watchpoint = 0;
4003 else
4004 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4005
4006 /* If necessary, step over this watchpoint. We'll be back to display
4007 it in a moment. */
4008 if (stopped_by_watchpoint
4009 && (target_have_steppable_watchpoint
4010 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4011 {
4012 /* At this point, we are stopped at an instruction which has
4013 attempted to write to a piece of memory under control of
4014 a watchpoint. The instruction hasn't actually executed
4015 yet. If we were to evaluate the watchpoint expression
4016 now, we would get the old value, and therefore no change
4017 would seem to have occurred.
4018
4019 In order to make watchpoints work `right', we really need
4020 to complete the memory write, and then evaluate the
4021 watchpoint expression. We do this by single-stepping the
4022 target.
4023
4024 It may not be necessary to disable the watchpoint to stop over
4025 it. For example, the PA can (with some kernel cooperation)
4026 single step over a watchpoint without disabling the watchpoint.
4027
4028 It is far more common to need to disable a watchpoint to step
4029 the inferior over it. If we have non-steppable watchpoints,
4030 we must disable the current watchpoint; it's simplest to
4031 disable all watchpoints and breakpoints. */
4032 int hw_step = 1;
4033
4034 if (!target_have_steppable_watchpoint)
4035 {
4036 remove_breakpoints ();
4037 /* See comment in resume why we need to stop bypassing signals
4038 while breakpoints have been removed. */
4039 target_pass_signals (0, NULL);
4040 }
4041 /* Single step */
4042 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4043 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4044 waiton_ptid = ecs->ptid;
4045 if (target_have_steppable_watchpoint)
4046 infwait_state = infwait_step_watch_state;
4047 else
4048 infwait_state = infwait_nonstep_watch_state;
4049 prepare_to_wait (ecs);
4050 return;
4051 }
4052
4053 ecs->event_thread->stepping_over_breakpoint = 0;
4054 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4055 ecs->event_thread->control.stop_step = 0;
4056 stop_print_frame = 1;
4057 stopped_by_random_signal = 0;
4058
4059 /* Hide inlined functions starting here, unless we just performed stepi or
4060 nexti. After stepi and nexti, always show the innermost frame (not any
4061 inline function call sites). */
4062 if (ecs->event_thread->control.step_range_end != 1)
4063 {
4064 struct address_space *aspace =
4065 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4066
4067 /* skip_inline_frames is expensive, so we avoid it if we can
4068 determine that the address is one where functions cannot have
4069 been inlined. This improves performance with inferiors that
4070 load a lot of shared libraries, because the solib event
4071 breakpoint is defined as the address of a function (i.e. not
4072 inline). Note that we have to check the previous PC as well
4073 as the current one to catch cases when we have just
4074 single-stepped off a breakpoint prior to reinstating it.
4075 Note that we're assuming that the code we single-step to is
4076 not inline, but that's not definitive: there's nothing
4077 preventing the event breakpoint function from containing
4078 inlined code, and the single-step ending up there. If the
4079 user had set a breakpoint on that inlined code, the missing
4080 skip_inline_frames call would break things. Fortunately
4081 that's an extremely unlikely scenario. */
4082 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4083 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4084 && ecs->event_thread->control.trap_expected
4085 && pc_at_non_inline_function (aspace,
4086 ecs->event_thread->prev_pc,
4087 &ecs->ws)))
4088 {
4089 skip_inline_frames (ecs->ptid);
4090
4091 /* Re-fetch current thread's frame in case that invalidated
4092 the frame cache. */
4093 frame = get_current_frame ();
4094 gdbarch = get_frame_arch (frame);
4095 }
4096 }
4097
4098 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4099 && ecs->event_thread->control.trap_expected
4100 && gdbarch_single_step_through_delay_p (gdbarch)
4101 && currently_stepping (ecs->event_thread))
4102 {
4103 /* We're trying to step off a breakpoint. Turns out that we're
4104 also on an instruction that needs to be stepped multiple
4105 times before it's been fully executing. E.g., architectures
4106 with a delay slot. It needs to be stepped twice, once for
4107 the instruction and once for the delay slot. */
4108 int step_through_delay
4109 = gdbarch_single_step_through_delay (gdbarch, frame);
4110
4111 if (debug_infrun && step_through_delay)
4112 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4113 if (ecs->event_thread->control.step_range_end == 0
4114 && step_through_delay)
4115 {
4116 /* The user issued a continue when stopped at a breakpoint.
4117 Set up for another trap and get out of here. */
4118 ecs->event_thread->stepping_over_breakpoint = 1;
4119 keep_going (ecs);
4120 return;
4121 }
4122 else if (step_through_delay)
4123 {
4124 /* The user issued a step when stopped at a breakpoint.
4125 Maybe we should stop, maybe we should not - the delay
4126 slot *might* correspond to a line of source. In any
4127 case, don't decide that here, just set
4128 ecs->stepping_over_breakpoint, making sure we
4129 single-step again before breakpoints are re-inserted. */
4130 ecs->event_thread->stepping_over_breakpoint = 1;
4131 }
4132 }
4133
4134 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4135 handles this event. */
4136 ecs->event_thread->control.stop_bpstat
4137 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4138 stop_pc, ecs->ptid, &ecs->ws);
4139
4140 /* Following in case break condition called a
4141 function. */
4142 stop_print_frame = 1;
4143
4144 /* This is where we handle "moribund" watchpoints. Unlike
4145 software breakpoints traps, hardware watchpoint traps are
4146 always distinguishable from random traps. If no high-level
4147 watchpoint is associated with the reported stop data address
4148 anymore, then the bpstat does not explain the signal ---
4149 simply make sure to ignore it if `stopped_by_watchpoint' is
4150 set. */
4151
4152 if (debug_infrun
4153 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4154 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4155 GDB_SIGNAL_TRAP)
4156 && stopped_by_watchpoint)
4157 fprintf_unfiltered (gdb_stdlog,
4158 "infrun: no user watchpoint explains "
4159 "watchpoint SIGTRAP, ignoring\n");
4160
4161 /* NOTE: cagney/2003-03-29: These checks for a random signal
4162 at one stage in the past included checks for an inferior
4163 function call's call dummy's return breakpoint. The original
4164 comment, that went with the test, read:
4165
4166 ``End of a stack dummy. Some systems (e.g. Sony news) give
4167 another signal besides SIGTRAP, so check here as well as
4168 above.''
4169
4170 If someone ever tries to get call dummys on a
4171 non-executable stack to work (where the target would stop
4172 with something like a SIGSEGV), then those tests might need
4173 to be re-instated. Given, however, that the tests were only
4174 enabled when momentary breakpoints were not being used, I
4175 suspect that it won't be the case.
4176
4177 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4178 be necessary for call dummies on a non-executable stack on
4179 SPARC. */
4180
4181 /* See if the breakpoints module can explain the signal. */
4182 random_signal
4183 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4184 ecs->event_thread->suspend.stop_signal);
4185
4186 /* If not, perhaps stepping/nexting can. */
4187 if (random_signal)
4188 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4189 && currently_stepping (ecs->event_thread));
4190
4191 /* Perhaps the thread hit a single-step breakpoint of _another_
4192 thread. Single-step breakpoints are transparent to the
4193 breakpoints module. */
4194 if (random_signal)
4195 random_signal = !ecs->hit_singlestep_breakpoint;
4196
4197 /* No? Perhaps we got a moribund watchpoint. */
4198 if (random_signal)
4199 random_signal = !stopped_by_watchpoint;
4200
4201 /* For the program's own signals, act according to
4202 the signal handling tables. */
4203
4204 if (random_signal)
4205 {
4206 /* Signal not for debugging purposes. */
4207 int printed = 0;
4208 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4209 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4210
4211 if (debug_infrun)
4212 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4213 gdb_signal_to_symbol_string (stop_signal));
4214
4215 stopped_by_random_signal = 1;
4216
4217 if (signal_print[ecs->event_thread->suspend.stop_signal])
4218 {
4219 /* The signal table tells us to print about this signal. */
4220 printed = 1;
4221 target_terminal_ours_for_output ();
4222 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4223 }
4224 /* Always stop on signals if we're either just gaining control
4225 of the program, or the user explicitly requested this thread
4226 to remain stopped. */
4227 if (stop_soon != NO_STOP_QUIETLY
4228 || ecs->event_thread->stop_requested
4229 || (!inf->detaching
4230 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4231 {
4232 stop_stepping (ecs);
4233 return;
4234 }
4235 /* If not going to stop, give terminal back
4236 if we took it away. */
4237 else if (printed)
4238 target_terminal_inferior ();
4239
4240 /* Clear the signal if it should not be passed. */
4241 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4242 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4243
4244 if (ecs->event_thread->prev_pc == stop_pc
4245 && ecs->event_thread->control.trap_expected
4246 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4247 {
4248 /* We were just starting a new sequence, attempting to
4249 single-step off of a breakpoint and expecting a SIGTRAP.
4250 Instead this signal arrives. This signal will take us out
4251 of the stepping range so GDB needs to remember to, when
4252 the signal handler returns, resume stepping off that
4253 breakpoint. */
4254 /* To simplify things, "continue" is forced to use the same
4255 code paths as single-step - set a breakpoint at the
4256 signal return address and then, once hit, step off that
4257 breakpoint. */
4258 if (debug_infrun)
4259 fprintf_unfiltered (gdb_stdlog,
4260 "infrun: signal arrived while stepping over "
4261 "breakpoint\n");
4262
4263 insert_hp_step_resume_breakpoint_at_frame (frame);
4264 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4265 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4266 ecs->event_thread->control.trap_expected = 0;
4267
4268 /* If we were nexting/stepping some other thread, switch to
4269 it, so that we don't continue it, losing control. */
4270 if (!switch_back_to_stepped_thread (ecs))
4271 keep_going (ecs);
4272 return;
4273 }
4274
4275 if (ecs->event_thread->control.step_range_end != 0
4276 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4277 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4278 && frame_id_eq (get_stack_frame_id (frame),
4279 ecs->event_thread->control.step_stack_frame_id)
4280 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4281 {
4282 /* The inferior is about to take a signal that will take it
4283 out of the single step range. Set a breakpoint at the
4284 current PC (which is presumably where the signal handler
4285 will eventually return) and then allow the inferior to
4286 run free.
4287
4288 Note that this is only needed for a signal delivered
4289 while in the single-step range. Nested signals aren't a
4290 problem as they eventually all return. */
4291 if (debug_infrun)
4292 fprintf_unfiltered (gdb_stdlog,
4293 "infrun: signal may take us out of "
4294 "single-step range\n");
4295
4296 insert_hp_step_resume_breakpoint_at_frame (frame);
4297 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4298 ecs->event_thread->control.trap_expected = 0;
4299 keep_going (ecs);
4300 return;
4301 }
4302
4303 /* Note: step_resume_breakpoint may be non-NULL. This occures
4304 when either there's a nested signal, or when there's a
4305 pending signal enabled just as the signal handler returns
4306 (leaving the inferior at the step-resume-breakpoint without
4307 actually executing it). Either way continue until the
4308 breakpoint is really hit. */
4309
4310 if (!switch_back_to_stepped_thread (ecs))
4311 {
4312 if (debug_infrun)
4313 fprintf_unfiltered (gdb_stdlog,
4314 "infrun: random signal, keep going\n");
4315
4316 keep_going (ecs);
4317 }
4318 return;
4319 }
4320
4321 process_event_stop_test (ecs);
4322 }
4323
4324 /* Come here when we've got some debug event / signal we can explain
4325 (IOW, not a random signal), and test whether it should cause a
4326 stop, or whether we should resume the inferior (transparently).
4327 E.g., could be a breakpoint whose condition evaluates false; we
4328 could be still stepping within the line; etc. */
4329
4330 static void
4331 process_event_stop_test (struct execution_control_state *ecs)
4332 {
4333 struct symtab_and_line stop_pc_sal;
4334 struct frame_info *frame;
4335 struct gdbarch *gdbarch;
4336 CORE_ADDR jmp_buf_pc;
4337 struct bpstat_what what;
4338
4339 /* Handle cases caused by hitting a breakpoint. */
4340
4341 frame = get_current_frame ();
4342 gdbarch = get_frame_arch (frame);
4343
4344 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4345
4346 if (what.call_dummy)
4347 {
4348 stop_stack_dummy = what.call_dummy;
4349 }
4350
4351 /* If we hit an internal event that triggers symbol changes, the
4352 current frame will be invalidated within bpstat_what (e.g., if we
4353 hit an internal solib event). Re-fetch it. */
4354 frame = get_current_frame ();
4355 gdbarch = get_frame_arch (frame);
4356
4357 switch (what.main_action)
4358 {
4359 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4360 /* If we hit the breakpoint at longjmp while stepping, we
4361 install a momentary breakpoint at the target of the
4362 jmp_buf. */
4363
4364 if (debug_infrun)
4365 fprintf_unfiltered (gdb_stdlog,
4366 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4367
4368 ecs->event_thread->stepping_over_breakpoint = 1;
4369
4370 if (what.is_longjmp)
4371 {
4372 struct value *arg_value;
4373
4374 /* If we set the longjmp breakpoint via a SystemTap probe,
4375 then use it to extract the arguments. The destination PC
4376 is the third argument to the probe. */
4377 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4378 if (arg_value)
4379 jmp_buf_pc = value_as_address (arg_value);
4380 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4381 || !gdbarch_get_longjmp_target (gdbarch,
4382 frame, &jmp_buf_pc))
4383 {
4384 if (debug_infrun)
4385 fprintf_unfiltered (gdb_stdlog,
4386 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4387 "(!gdbarch_get_longjmp_target)\n");
4388 keep_going (ecs);
4389 return;
4390 }
4391
4392 /* Insert a breakpoint at resume address. */
4393 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4394 }
4395 else
4396 check_exception_resume (ecs, frame);
4397 keep_going (ecs);
4398 return;
4399
4400 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4401 {
4402 struct frame_info *init_frame;
4403
4404 /* There are several cases to consider.
4405
4406 1. The initiating frame no longer exists. In this case we
4407 must stop, because the exception or longjmp has gone too
4408 far.
4409
4410 2. The initiating frame exists, and is the same as the
4411 current frame. We stop, because the exception or longjmp
4412 has been caught.
4413
4414 3. The initiating frame exists and is different from the
4415 current frame. This means the exception or longjmp has
4416 been caught beneath the initiating frame, so keep going.
4417
4418 4. longjmp breakpoint has been placed just to protect
4419 against stale dummy frames and user is not interested in
4420 stopping around longjmps. */
4421
4422 if (debug_infrun)
4423 fprintf_unfiltered (gdb_stdlog,
4424 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4425
4426 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4427 != NULL);
4428 delete_exception_resume_breakpoint (ecs->event_thread);
4429
4430 if (what.is_longjmp)
4431 {
4432 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4433
4434 if (!frame_id_p (ecs->event_thread->initiating_frame))
4435 {
4436 /* Case 4. */
4437 keep_going (ecs);
4438 return;
4439 }
4440 }
4441
4442 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4443
4444 if (init_frame)
4445 {
4446 struct frame_id current_id
4447 = get_frame_id (get_current_frame ());
4448 if (frame_id_eq (current_id,
4449 ecs->event_thread->initiating_frame))
4450 {
4451 /* Case 2. Fall through. */
4452 }
4453 else
4454 {
4455 /* Case 3. */
4456 keep_going (ecs);
4457 return;
4458 }
4459 }
4460
4461 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4462 exists. */
4463 delete_step_resume_breakpoint (ecs->event_thread);
4464
4465 ecs->event_thread->control.stop_step = 1;
4466 end_stepping_range ();
4467 stop_stepping (ecs);
4468 }
4469 return;
4470
4471 case BPSTAT_WHAT_SINGLE:
4472 if (debug_infrun)
4473 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4474 ecs->event_thread->stepping_over_breakpoint = 1;
4475 /* Still need to check other stuff, at least the case where we
4476 are stepping and step out of the right range. */
4477 break;
4478
4479 case BPSTAT_WHAT_STEP_RESUME:
4480 if (debug_infrun)
4481 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4482
4483 delete_step_resume_breakpoint (ecs->event_thread);
4484 if (ecs->event_thread->control.proceed_to_finish
4485 && execution_direction == EXEC_REVERSE)
4486 {
4487 struct thread_info *tp = ecs->event_thread;
4488
4489 /* We are finishing a function in reverse, and just hit the
4490 step-resume breakpoint at the start address of the
4491 function, and we're almost there -- just need to back up
4492 by one more single-step, which should take us back to the
4493 function call. */
4494 tp->control.step_range_start = tp->control.step_range_end = 1;
4495 keep_going (ecs);
4496 return;
4497 }
4498 fill_in_stop_func (gdbarch, ecs);
4499 if (stop_pc == ecs->stop_func_start
4500 && execution_direction == EXEC_REVERSE)
4501 {
4502 /* We are stepping over a function call in reverse, and just
4503 hit the step-resume breakpoint at the start address of
4504 the function. Go back to single-stepping, which should
4505 take us back to the function call. */
4506 ecs->event_thread->stepping_over_breakpoint = 1;
4507 keep_going (ecs);
4508 return;
4509 }
4510 break;
4511
4512 case BPSTAT_WHAT_STOP_NOISY:
4513 if (debug_infrun)
4514 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4515 stop_print_frame = 1;
4516
4517 /* Assume the thread stopped for a breapoint. We'll still check
4518 whether a/the breakpoint is there when the thread is next
4519 resumed. */
4520 ecs->event_thread->stepping_over_breakpoint = 1;
4521
4522 stop_stepping (ecs);
4523 return;
4524
4525 case BPSTAT_WHAT_STOP_SILENT:
4526 if (debug_infrun)
4527 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4528 stop_print_frame = 0;
4529
4530 /* Assume the thread stopped for a breapoint. We'll still check
4531 whether a/the breakpoint is there when the thread is next
4532 resumed. */
4533 ecs->event_thread->stepping_over_breakpoint = 1;
4534 stop_stepping (ecs);
4535 return;
4536
4537 case BPSTAT_WHAT_HP_STEP_RESUME:
4538 if (debug_infrun)
4539 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4540
4541 delete_step_resume_breakpoint (ecs->event_thread);
4542 if (ecs->event_thread->step_after_step_resume_breakpoint)
4543 {
4544 /* Back when the step-resume breakpoint was inserted, we
4545 were trying to single-step off a breakpoint. Go back to
4546 doing that. */
4547 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4548 ecs->event_thread->stepping_over_breakpoint = 1;
4549 keep_going (ecs);
4550 return;
4551 }
4552 break;
4553
4554 case BPSTAT_WHAT_KEEP_CHECKING:
4555 break;
4556 }
4557
4558 /* We come here if we hit a breakpoint but should not stop for it.
4559 Possibly we also were stepping and should stop for that. So fall
4560 through and test for stepping. But, if not stepping, do not
4561 stop. */
4562
4563 /* In all-stop mode, if we're currently stepping but have stopped in
4564 some other thread, we need to switch back to the stepped thread. */
4565 if (switch_back_to_stepped_thread (ecs))
4566 return;
4567
4568 if (ecs->event_thread->control.step_resume_breakpoint)
4569 {
4570 if (debug_infrun)
4571 fprintf_unfiltered (gdb_stdlog,
4572 "infrun: step-resume breakpoint is inserted\n");
4573
4574 /* Having a step-resume breakpoint overrides anything
4575 else having to do with stepping commands until
4576 that breakpoint is reached. */
4577 keep_going (ecs);
4578 return;
4579 }
4580
4581 if (ecs->event_thread->control.step_range_end == 0)
4582 {
4583 if (debug_infrun)
4584 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4585 /* Likewise if we aren't even stepping. */
4586 keep_going (ecs);
4587 return;
4588 }
4589
4590 /* Re-fetch current thread's frame in case the code above caused
4591 the frame cache to be re-initialized, making our FRAME variable
4592 a dangling pointer. */
4593 frame = get_current_frame ();
4594 gdbarch = get_frame_arch (frame);
4595 fill_in_stop_func (gdbarch, ecs);
4596
4597 /* If stepping through a line, keep going if still within it.
4598
4599 Note that step_range_end is the address of the first instruction
4600 beyond the step range, and NOT the address of the last instruction
4601 within it!
4602
4603 Note also that during reverse execution, we may be stepping
4604 through a function epilogue and therefore must detect when
4605 the current-frame changes in the middle of a line. */
4606
4607 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4608 && (execution_direction != EXEC_REVERSE
4609 || frame_id_eq (get_frame_id (frame),
4610 ecs->event_thread->control.step_frame_id)))
4611 {
4612 if (debug_infrun)
4613 fprintf_unfiltered
4614 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4615 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4616 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4617
4618 /* Tentatively re-enable range stepping; `resume' disables it if
4619 necessary (e.g., if we're stepping over a breakpoint or we
4620 have software watchpoints). */
4621 ecs->event_thread->control.may_range_step = 1;
4622
4623 /* When stepping backward, stop at beginning of line range
4624 (unless it's the function entry point, in which case
4625 keep going back to the call point). */
4626 if (stop_pc == ecs->event_thread->control.step_range_start
4627 && stop_pc != ecs->stop_func_start
4628 && execution_direction == EXEC_REVERSE)
4629 {
4630 ecs->event_thread->control.stop_step = 1;
4631 end_stepping_range ();
4632 stop_stepping (ecs);
4633 }
4634 else
4635 keep_going (ecs);
4636
4637 return;
4638 }
4639
4640 /* We stepped out of the stepping range. */
4641
4642 /* If we are stepping at the source level and entered the runtime
4643 loader dynamic symbol resolution code...
4644
4645 EXEC_FORWARD: we keep on single stepping until we exit the run
4646 time loader code and reach the callee's address.
4647
4648 EXEC_REVERSE: we've already executed the callee (backward), and
4649 the runtime loader code is handled just like any other
4650 undebuggable function call. Now we need only keep stepping
4651 backward through the trampoline code, and that's handled further
4652 down, so there is nothing for us to do here. */
4653
4654 if (execution_direction != EXEC_REVERSE
4655 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4656 && in_solib_dynsym_resolve_code (stop_pc))
4657 {
4658 CORE_ADDR pc_after_resolver =
4659 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4660
4661 if (debug_infrun)
4662 fprintf_unfiltered (gdb_stdlog,
4663 "infrun: stepped into dynsym resolve code\n");
4664
4665 if (pc_after_resolver)
4666 {
4667 /* Set up a step-resume breakpoint at the address
4668 indicated by SKIP_SOLIB_RESOLVER. */
4669 struct symtab_and_line sr_sal;
4670
4671 init_sal (&sr_sal);
4672 sr_sal.pc = pc_after_resolver;
4673 sr_sal.pspace = get_frame_program_space (frame);
4674
4675 insert_step_resume_breakpoint_at_sal (gdbarch,
4676 sr_sal, null_frame_id);
4677 }
4678
4679 keep_going (ecs);
4680 return;
4681 }
4682
4683 if (ecs->event_thread->control.step_range_end != 1
4684 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4685 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4686 && get_frame_type (frame) == SIGTRAMP_FRAME)
4687 {
4688 if (debug_infrun)
4689 fprintf_unfiltered (gdb_stdlog,
4690 "infrun: stepped into signal trampoline\n");
4691 /* The inferior, while doing a "step" or "next", has ended up in
4692 a signal trampoline (either by a signal being delivered or by
4693 the signal handler returning). Just single-step until the
4694 inferior leaves the trampoline (either by calling the handler
4695 or returning). */
4696 keep_going (ecs);
4697 return;
4698 }
4699
4700 /* If we're in the return path from a shared library trampoline,
4701 we want to proceed through the trampoline when stepping. */
4702 /* macro/2012-04-25: This needs to come before the subroutine
4703 call check below as on some targets return trampolines look
4704 like subroutine calls (MIPS16 return thunks). */
4705 if (gdbarch_in_solib_return_trampoline (gdbarch,
4706 stop_pc, ecs->stop_func_name)
4707 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4708 {
4709 /* Determine where this trampoline returns. */
4710 CORE_ADDR real_stop_pc;
4711
4712 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4713
4714 if (debug_infrun)
4715 fprintf_unfiltered (gdb_stdlog,
4716 "infrun: stepped into solib return tramp\n");
4717
4718 /* Only proceed through if we know where it's going. */
4719 if (real_stop_pc)
4720 {
4721 /* And put the step-breakpoint there and go until there. */
4722 struct symtab_and_line sr_sal;
4723
4724 init_sal (&sr_sal); /* initialize to zeroes */
4725 sr_sal.pc = real_stop_pc;
4726 sr_sal.section = find_pc_overlay (sr_sal.pc);
4727 sr_sal.pspace = get_frame_program_space (frame);
4728
4729 /* Do not specify what the fp should be when we stop since
4730 on some machines the prologue is where the new fp value
4731 is established. */
4732 insert_step_resume_breakpoint_at_sal (gdbarch,
4733 sr_sal, null_frame_id);
4734
4735 /* Restart without fiddling with the step ranges or
4736 other state. */
4737 keep_going (ecs);
4738 return;
4739 }
4740 }
4741
4742 /* Check for subroutine calls. The check for the current frame
4743 equalling the step ID is not necessary - the check of the
4744 previous frame's ID is sufficient - but it is a common case and
4745 cheaper than checking the previous frame's ID.
4746
4747 NOTE: frame_id_eq will never report two invalid frame IDs as
4748 being equal, so to get into this block, both the current and
4749 previous frame must have valid frame IDs. */
4750 /* The outer_frame_id check is a heuristic to detect stepping
4751 through startup code. If we step over an instruction which
4752 sets the stack pointer from an invalid value to a valid value,
4753 we may detect that as a subroutine call from the mythical
4754 "outermost" function. This could be fixed by marking
4755 outermost frames as !stack_p,code_p,special_p. Then the
4756 initial outermost frame, before sp was valid, would
4757 have code_addr == &_start. See the comment in frame_id_eq
4758 for more. */
4759 if (!frame_id_eq (get_stack_frame_id (frame),
4760 ecs->event_thread->control.step_stack_frame_id)
4761 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4762 ecs->event_thread->control.step_stack_frame_id)
4763 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4764 outer_frame_id)
4765 || step_start_function != find_pc_function (stop_pc))))
4766 {
4767 CORE_ADDR real_stop_pc;
4768
4769 if (debug_infrun)
4770 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4771
4772 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4773 || ((ecs->event_thread->control.step_range_end == 1)
4774 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4775 ecs->stop_func_start)))
4776 {
4777 /* I presume that step_over_calls is only 0 when we're
4778 supposed to be stepping at the assembly language level
4779 ("stepi"). Just stop. */
4780 /* Also, maybe we just did a "nexti" inside a prolog, so we
4781 thought it was a subroutine call but it was not. Stop as
4782 well. FENN */
4783 /* And this works the same backward as frontward. MVS */
4784 ecs->event_thread->control.stop_step = 1;
4785 end_stepping_range ();
4786 stop_stepping (ecs);
4787 return;
4788 }
4789
4790 /* Reverse stepping through solib trampolines. */
4791
4792 if (execution_direction == EXEC_REVERSE
4793 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4794 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4795 || (ecs->stop_func_start == 0
4796 && in_solib_dynsym_resolve_code (stop_pc))))
4797 {
4798 /* Any solib trampoline code can be handled in reverse
4799 by simply continuing to single-step. We have already
4800 executed the solib function (backwards), and a few
4801 steps will take us back through the trampoline to the
4802 caller. */
4803 keep_going (ecs);
4804 return;
4805 }
4806
4807 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4808 {
4809 /* We're doing a "next".
4810
4811 Normal (forward) execution: set a breakpoint at the
4812 callee's return address (the address at which the caller
4813 will resume).
4814
4815 Reverse (backward) execution. set the step-resume
4816 breakpoint at the start of the function that we just
4817 stepped into (backwards), and continue to there. When we
4818 get there, we'll need to single-step back to the caller. */
4819
4820 if (execution_direction == EXEC_REVERSE)
4821 {
4822 /* If we're already at the start of the function, we've either
4823 just stepped backward into a single instruction function,
4824 or stepped back out of a signal handler to the first instruction
4825 of the function. Just keep going, which will single-step back
4826 to the caller. */
4827 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
4828 {
4829 struct symtab_and_line sr_sal;
4830
4831 /* Normal function call return (static or dynamic). */
4832 init_sal (&sr_sal);
4833 sr_sal.pc = ecs->stop_func_start;
4834 sr_sal.pspace = get_frame_program_space (frame);
4835 insert_step_resume_breakpoint_at_sal (gdbarch,
4836 sr_sal, null_frame_id);
4837 }
4838 }
4839 else
4840 insert_step_resume_breakpoint_at_caller (frame);
4841
4842 keep_going (ecs);
4843 return;
4844 }
4845
4846 /* If we are in a function call trampoline (a stub between the
4847 calling routine and the real function), locate the real
4848 function. That's what tells us (a) whether we want to step
4849 into it at all, and (b) what prologue we want to run to the
4850 end of, if we do step into it. */
4851 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4852 if (real_stop_pc == 0)
4853 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4854 if (real_stop_pc != 0)
4855 ecs->stop_func_start = real_stop_pc;
4856
4857 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4858 {
4859 struct symtab_and_line sr_sal;
4860
4861 init_sal (&sr_sal);
4862 sr_sal.pc = ecs->stop_func_start;
4863 sr_sal.pspace = get_frame_program_space (frame);
4864
4865 insert_step_resume_breakpoint_at_sal (gdbarch,
4866 sr_sal, null_frame_id);
4867 keep_going (ecs);
4868 return;
4869 }
4870
4871 /* If we have line number information for the function we are
4872 thinking of stepping into and the function isn't on the skip
4873 list, step into it.
4874
4875 If there are several symtabs at that PC (e.g. with include
4876 files), just want to know whether *any* of them have line
4877 numbers. find_pc_line handles this. */
4878 {
4879 struct symtab_and_line tmp_sal;
4880
4881 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4882 if (tmp_sal.line != 0
4883 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4884 &tmp_sal))
4885 {
4886 if (execution_direction == EXEC_REVERSE)
4887 handle_step_into_function_backward (gdbarch, ecs);
4888 else
4889 handle_step_into_function (gdbarch, ecs);
4890 return;
4891 }
4892 }
4893
4894 /* If we have no line number and the step-stop-if-no-debug is
4895 set, we stop the step so that the user has a chance to switch
4896 in assembly mode. */
4897 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4898 && step_stop_if_no_debug)
4899 {
4900 ecs->event_thread->control.stop_step = 1;
4901 end_stepping_range ();
4902 stop_stepping (ecs);
4903 return;
4904 }
4905
4906 if (execution_direction == EXEC_REVERSE)
4907 {
4908 /* If we're already at the start of the function, we've either just
4909 stepped backward into a single instruction function without line
4910 number info, or stepped back out of a signal handler to the first
4911 instruction of the function without line number info. Just keep
4912 going, which will single-step back to the caller. */
4913 if (ecs->stop_func_start != stop_pc)
4914 {
4915 /* Set a breakpoint at callee's start address.
4916 From there we can step once and be back in the caller. */
4917 struct symtab_and_line sr_sal;
4918
4919 init_sal (&sr_sal);
4920 sr_sal.pc = ecs->stop_func_start;
4921 sr_sal.pspace = get_frame_program_space (frame);
4922 insert_step_resume_breakpoint_at_sal (gdbarch,
4923 sr_sal, null_frame_id);
4924 }
4925 }
4926 else
4927 /* Set a breakpoint at callee's return address (the address
4928 at which the caller will resume). */
4929 insert_step_resume_breakpoint_at_caller (frame);
4930
4931 keep_going (ecs);
4932 return;
4933 }
4934
4935 /* Reverse stepping through solib trampolines. */
4936
4937 if (execution_direction == EXEC_REVERSE
4938 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4939 {
4940 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4941 || (ecs->stop_func_start == 0
4942 && in_solib_dynsym_resolve_code (stop_pc)))
4943 {
4944 /* Any solib trampoline code can be handled in reverse
4945 by simply continuing to single-step. We have already
4946 executed the solib function (backwards), and a few
4947 steps will take us back through the trampoline to the
4948 caller. */
4949 keep_going (ecs);
4950 return;
4951 }
4952 else if (in_solib_dynsym_resolve_code (stop_pc))
4953 {
4954 /* Stepped backward into the solib dynsym resolver.
4955 Set a breakpoint at its start and continue, then
4956 one more step will take us out. */
4957 struct symtab_and_line sr_sal;
4958
4959 init_sal (&sr_sal);
4960 sr_sal.pc = ecs->stop_func_start;
4961 sr_sal.pspace = get_frame_program_space (frame);
4962 insert_step_resume_breakpoint_at_sal (gdbarch,
4963 sr_sal, null_frame_id);
4964 keep_going (ecs);
4965 return;
4966 }
4967 }
4968
4969 stop_pc_sal = find_pc_line (stop_pc, 0);
4970
4971 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4972 the trampoline processing logic, however, there are some trampolines
4973 that have no names, so we should do trampoline handling first. */
4974 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4975 && ecs->stop_func_name == NULL
4976 && stop_pc_sal.line == 0)
4977 {
4978 if (debug_infrun)
4979 fprintf_unfiltered (gdb_stdlog,
4980 "infrun: stepped into undebuggable function\n");
4981
4982 /* The inferior just stepped into, or returned to, an
4983 undebuggable function (where there is no debugging information
4984 and no line number corresponding to the address where the
4985 inferior stopped). Since we want to skip this kind of code,
4986 we keep going until the inferior returns from this
4987 function - unless the user has asked us not to (via
4988 set step-mode) or we no longer know how to get back
4989 to the call site. */
4990 if (step_stop_if_no_debug
4991 || !frame_id_p (frame_unwind_caller_id (frame)))
4992 {
4993 /* If we have no line number and the step-stop-if-no-debug
4994 is set, we stop the step so that the user has a chance to
4995 switch in assembly mode. */
4996 ecs->event_thread->control.stop_step = 1;
4997 end_stepping_range ();
4998 stop_stepping (ecs);
4999 return;
5000 }
5001 else
5002 {
5003 /* Set a breakpoint at callee's return address (the address
5004 at which the caller will resume). */
5005 insert_step_resume_breakpoint_at_caller (frame);
5006 keep_going (ecs);
5007 return;
5008 }
5009 }
5010
5011 if (ecs->event_thread->control.step_range_end == 1)
5012 {
5013 /* It is stepi or nexti. We always want to stop stepping after
5014 one instruction. */
5015 if (debug_infrun)
5016 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5017 ecs->event_thread->control.stop_step = 1;
5018 end_stepping_range ();
5019 stop_stepping (ecs);
5020 return;
5021 }
5022
5023 if (stop_pc_sal.line == 0)
5024 {
5025 /* We have no line number information. That means to stop
5026 stepping (does this always happen right after one instruction,
5027 when we do "s" in a function with no line numbers,
5028 or can this happen as a result of a return or longjmp?). */
5029 if (debug_infrun)
5030 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5031 ecs->event_thread->control.stop_step = 1;
5032 end_stepping_range ();
5033 stop_stepping (ecs);
5034 return;
5035 }
5036
5037 /* Look for "calls" to inlined functions, part one. If the inline
5038 frame machinery detected some skipped call sites, we have entered
5039 a new inline function. */
5040
5041 if (frame_id_eq (get_frame_id (get_current_frame ()),
5042 ecs->event_thread->control.step_frame_id)
5043 && inline_skipped_frames (ecs->ptid))
5044 {
5045 struct symtab_and_line call_sal;
5046
5047 if (debug_infrun)
5048 fprintf_unfiltered (gdb_stdlog,
5049 "infrun: stepped into inlined function\n");
5050
5051 find_frame_sal (get_current_frame (), &call_sal);
5052
5053 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5054 {
5055 /* For "step", we're going to stop. But if the call site
5056 for this inlined function is on the same source line as
5057 we were previously stepping, go down into the function
5058 first. Otherwise stop at the call site. */
5059
5060 if (call_sal.line == ecs->event_thread->current_line
5061 && call_sal.symtab == ecs->event_thread->current_symtab)
5062 step_into_inline_frame (ecs->ptid);
5063
5064 ecs->event_thread->control.stop_step = 1;
5065 end_stepping_range ();
5066 stop_stepping (ecs);
5067 return;
5068 }
5069 else
5070 {
5071 /* For "next", we should stop at the call site if it is on a
5072 different source line. Otherwise continue through the
5073 inlined function. */
5074 if (call_sal.line == ecs->event_thread->current_line
5075 && call_sal.symtab == ecs->event_thread->current_symtab)
5076 keep_going (ecs);
5077 else
5078 {
5079 ecs->event_thread->control.stop_step = 1;
5080 end_stepping_range ();
5081 stop_stepping (ecs);
5082 }
5083 return;
5084 }
5085 }
5086
5087 /* Look for "calls" to inlined functions, part two. If we are still
5088 in the same real function we were stepping through, but we have
5089 to go further up to find the exact frame ID, we are stepping
5090 through a more inlined call beyond its call site. */
5091
5092 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5093 && !frame_id_eq (get_frame_id (get_current_frame ()),
5094 ecs->event_thread->control.step_frame_id)
5095 && stepped_in_from (get_current_frame (),
5096 ecs->event_thread->control.step_frame_id))
5097 {
5098 if (debug_infrun)
5099 fprintf_unfiltered (gdb_stdlog,
5100 "infrun: stepping through inlined function\n");
5101
5102 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5103 keep_going (ecs);
5104 else
5105 {
5106 ecs->event_thread->control.stop_step = 1;
5107 end_stepping_range ();
5108 stop_stepping (ecs);
5109 }
5110 return;
5111 }
5112
5113 if ((stop_pc == stop_pc_sal.pc)
5114 && (ecs->event_thread->current_line != stop_pc_sal.line
5115 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5116 {
5117 /* We are at the start of a different line. So stop. Note that
5118 we don't stop if we step into the middle of a different line.
5119 That is said to make things like for (;;) statements work
5120 better. */
5121 if (debug_infrun)
5122 fprintf_unfiltered (gdb_stdlog,
5123 "infrun: stepped to a different line\n");
5124 ecs->event_thread->control.stop_step = 1;
5125 end_stepping_range ();
5126 stop_stepping (ecs);
5127 return;
5128 }
5129
5130 /* We aren't done stepping.
5131
5132 Optimize by setting the stepping range to the line.
5133 (We might not be in the original line, but if we entered a
5134 new line in mid-statement, we continue stepping. This makes
5135 things like for(;;) statements work better.) */
5136
5137 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5138 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5139 ecs->event_thread->control.may_range_step = 1;
5140 set_step_info (frame, stop_pc_sal);
5141
5142 if (debug_infrun)
5143 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5144 keep_going (ecs);
5145 }
5146
5147 /* In all-stop mode, if we're currently stepping but have stopped in
5148 some other thread, we may need to switch back to the stepped
5149 thread. Returns true we set the inferior running, false if we left
5150 it stopped (and the event needs further processing). */
5151
5152 static int
5153 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5154 {
5155 if (!non_stop)
5156 {
5157 struct thread_info *tp;
5158 struct thread_info *stepping_thread;
5159 struct thread_info *step_over;
5160
5161 /* If any thread is blocked on some internal breakpoint, and we
5162 simply need to step over that breakpoint to get it going
5163 again, do that first. */
5164
5165 /* However, if we see an event for the stepping thread, then we
5166 know all other threads have been moved past their breakpoints
5167 already. Let the caller check whether the step is finished,
5168 etc., before deciding to move it past a breakpoint. */
5169 if (ecs->event_thread->control.step_range_end != 0)
5170 return 0;
5171
5172 /* Check if the current thread is blocked on an incomplete
5173 step-over, interrupted by a random signal. */
5174 if (ecs->event_thread->control.trap_expected
5175 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5176 {
5177 if (debug_infrun)
5178 {
5179 fprintf_unfiltered (gdb_stdlog,
5180 "infrun: need to finish step-over of [%s]\n",
5181 target_pid_to_str (ecs->event_thread->ptid));
5182 }
5183 keep_going (ecs);
5184 return 1;
5185 }
5186
5187 /* Check if the current thread is blocked by a single-step
5188 breakpoint of another thread. */
5189 if (ecs->hit_singlestep_breakpoint)
5190 {
5191 if (debug_infrun)
5192 {
5193 fprintf_unfiltered (gdb_stdlog,
5194 "infrun: need to step [%s] over single-step "
5195 "breakpoint\n",
5196 target_pid_to_str (ecs->ptid));
5197 }
5198 keep_going (ecs);
5199 return 1;
5200 }
5201
5202 /* Otherwise, we no longer expect a trap in the current thread.
5203 Clear the trap_expected flag before switching back -- this is
5204 what keep_going does as well, if we call it. */
5205 ecs->event_thread->control.trap_expected = 0;
5206
5207 /* If scheduler locking applies even if not stepping, there's no
5208 need to walk over threads. Above we've checked whether the
5209 current thread is stepping. If some other thread not the
5210 event thread is stepping, then it must be that scheduler
5211 locking is not in effect. */
5212 if (schedlock_applies (0))
5213 return 0;
5214
5215 /* Look for the stepping/nexting thread, and check if any other
5216 thread other than the stepping thread needs to start a
5217 step-over. Do all step-overs before actually proceeding with
5218 step/next/etc. */
5219 stepping_thread = NULL;
5220 step_over = NULL;
5221 ALL_THREADS (tp)
5222 {
5223 /* Ignore threads of processes we're not resuming. */
5224 if (!sched_multi
5225 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5226 continue;
5227
5228 /* When stepping over a breakpoint, we lock all threads
5229 except the one that needs to move past the breakpoint.
5230 If a non-event thread has this set, the "incomplete
5231 step-over" check above should have caught it earlier. */
5232 gdb_assert (!tp->control.trap_expected);
5233
5234 /* Did we find the stepping thread? */
5235 if (tp->control.step_range_end)
5236 {
5237 /* Yep. There should only one though. */
5238 gdb_assert (stepping_thread == NULL);
5239
5240 /* The event thread is handled at the top, before we
5241 enter this loop. */
5242 gdb_assert (tp != ecs->event_thread);
5243
5244 /* If some thread other than the event thread is
5245 stepping, then scheduler locking can't be in effect,
5246 otherwise we wouldn't have resumed the current event
5247 thread in the first place. */
5248 gdb_assert (!schedlock_applies (1));
5249
5250 stepping_thread = tp;
5251 }
5252 else if (thread_still_needs_step_over (tp))
5253 {
5254 step_over = tp;
5255
5256 /* At the top we've returned early if the event thread
5257 is stepping. If some other thread not the event
5258 thread is stepping, then scheduler locking can't be
5259 in effect, and we can resume this thread. No need to
5260 keep looking for the stepping thread then. */
5261 break;
5262 }
5263 }
5264
5265 if (step_over != NULL)
5266 {
5267 tp = step_over;
5268 if (debug_infrun)
5269 {
5270 fprintf_unfiltered (gdb_stdlog,
5271 "infrun: need to step-over [%s]\n",
5272 target_pid_to_str (tp->ptid));
5273 }
5274
5275 /* Only the stepping thread should have this set. */
5276 gdb_assert (tp->control.step_range_end == 0);
5277
5278 ecs->ptid = tp->ptid;
5279 ecs->event_thread = tp;
5280 switch_to_thread (ecs->ptid);
5281 keep_going (ecs);
5282 return 1;
5283 }
5284
5285 if (stepping_thread != NULL)
5286 {
5287 struct frame_info *frame;
5288 struct gdbarch *gdbarch;
5289
5290 tp = stepping_thread;
5291
5292 /* If the stepping thread exited, then don't try to switch
5293 back and resume it, which could fail in several different
5294 ways depending on the target. Instead, just keep going.
5295
5296 We can find a stepping dead thread in the thread list in
5297 two cases:
5298
5299 - The target supports thread exit events, and when the
5300 target tries to delete the thread from the thread list,
5301 inferior_ptid pointed at the exiting thread. In such
5302 case, calling delete_thread does not really remove the
5303 thread from the list; instead, the thread is left listed,
5304 with 'exited' state.
5305
5306 - The target's debug interface does not support thread
5307 exit events, and so we have no idea whatsoever if the
5308 previously stepping thread is still alive. For that
5309 reason, we need to synchronously query the target
5310 now. */
5311 if (is_exited (tp->ptid)
5312 || !target_thread_alive (tp->ptid))
5313 {
5314 if (debug_infrun)
5315 fprintf_unfiltered (gdb_stdlog,
5316 "infrun: not switching back to "
5317 "stepped thread, it has vanished\n");
5318
5319 delete_thread (tp->ptid);
5320 keep_going (ecs);
5321 return 1;
5322 }
5323
5324 if (debug_infrun)
5325 fprintf_unfiltered (gdb_stdlog,
5326 "infrun: switching back to stepped thread\n");
5327
5328 ecs->event_thread = tp;
5329 ecs->ptid = tp->ptid;
5330 context_switch (ecs->ptid);
5331
5332 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5333 frame = get_current_frame ();
5334 gdbarch = get_frame_arch (frame);
5335
5336 /* If the PC of the thread we were trying to single-step has
5337 changed, then that thread has trapped or been signaled,
5338 but the event has not been reported to GDB yet. Re-poll
5339 the target looking for this particular thread's event
5340 (i.e. temporarily enable schedlock) by:
5341
5342 - setting a break at the current PC
5343 - resuming that particular thread, only (by setting
5344 trap expected)
5345
5346 This prevents us continuously moving the single-step
5347 breakpoint forward, one instruction at a time,
5348 overstepping. */
5349
5350 if (gdbarch_software_single_step_p (gdbarch)
5351 && stop_pc != tp->prev_pc)
5352 {
5353 if (debug_infrun)
5354 fprintf_unfiltered (gdb_stdlog,
5355 "infrun: expected thread advanced also\n");
5356
5357 insert_single_step_breakpoint (get_frame_arch (frame),
5358 get_frame_address_space (frame),
5359 stop_pc);
5360 singlestep_breakpoints_inserted_p = 1;
5361 ecs->event_thread->control.trap_expected = 1;
5362 singlestep_ptid = inferior_ptid;
5363 singlestep_pc = stop_pc;
5364
5365 resume (0, GDB_SIGNAL_0);
5366 prepare_to_wait (ecs);
5367 }
5368 else
5369 {
5370 if (debug_infrun)
5371 fprintf_unfiltered (gdb_stdlog,
5372 "infrun: expected thread still "
5373 "hasn't advanced\n");
5374 keep_going (ecs);
5375 }
5376
5377 return 1;
5378 }
5379 }
5380 return 0;
5381 }
5382
5383 /* Is thread TP in the middle of single-stepping? */
5384
5385 static int
5386 currently_stepping (struct thread_info *tp)
5387 {
5388 return ((tp->control.step_range_end
5389 && tp->control.step_resume_breakpoint == NULL)
5390 || tp->control.trap_expected
5391 || bpstat_should_step ());
5392 }
5393
5394 /* Inferior has stepped into a subroutine call with source code that
5395 we should not step over. Do step to the first line of code in
5396 it. */
5397
5398 static void
5399 handle_step_into_function (struct gdbarch *gdbarch,
5400 struct execution_control_state *ecs)
5401 {
5402 struct symtab *s;
5403 struct symtab_and_line stop_func_sal, sr_sal;
5404
5405 fill_in_stop_func (gdbarch, ecs);
5406
5407 s = find_pc_symtab (stop_pc);
5408 if (s && s->language != language_asm)
5409 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5410 ecs->stop_func_start);
5411
5412 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5413 /* Use the step_resume_break to step until the end of the prologue,
5414 even if that involves jumps (as it seems to on the vax under
5415 4.2). */
5416 /* If the prologue ends in the middle of a source line, continue to
5417 the end of that source line (if it is still within the function).
5418 Otherwise, just go to end of prologue. */
5419 if (stop_func_sal.end
5420 && stop_func_sal.pc != ecs->stop_func_start
5421 && stop_func_sal.end < ecs->stop_func_end)
5422 ecs->stop_func_start = stop_func_sal.end;
5423
5424 /* Architectures which require breakpoint adjustment might not be able
5425 to place a breakpoint at the computed address. If so, the test
5426 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5427 ecs->stop_func_start to an address at which a breakpoint may be
5428 legitimately placed.
5429
5430 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5431 made, GDB will enter an infinite loop when stepping through
5432 optimized code consisting of VLIW instructions which contain
5433 subinstructions corresponding to different source lines. On
5434 FR-V, it's not permitted to place a breakpoint on any but the
5435 first subinstruction of a VLIW instruction. When a breakpoint is
5436 set, GDB will adjust the breakpoint address to the beginning of
5437 the VLIW instruction. Thus, we need to make the corresponding
5438 adjustment here when computing the stop address. */
5439
5440 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5441 {
5442 ecs->stop_func_start
5443 = gdbarch_adjust_breakpoint_address (gdbarch,
5444 ecs->stop_func_start);
5445 }
5446
5447 if (ecs->stop_func_start == stop_pc)
5448 {
5449 /* We are already there: stop now. */
5450 ecs->event_thread->control.stop_step = 1;
5451 end_stepping_range ();
5452 stop_stepping (ecs);
5453 return;
5454 }
5455 else
5456 {
5457 /* Put the step-breakpoint there and go until there. */
5458 init_sal (&sr_sal); /* initialize to zeroes */
5459 sr_sal.pc = ecs->stop_func_start;
5460 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5461 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5462
5463 /* Do not specify what the fp should be when we stop since on
5464 some machines the prologue is where the new fp value is
5465 established. */
5466 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5467
5468 /* And make sure stepping stops right away then. */
5469 ecs->event_thread->control.step_range_end
5470 = ecs->event_thread->control.step_range_start;
5471 }
5472 keep_going (ecs);
5473 }
5474
5475 /* Inferior has stepped backward into a subroutine call with source
5476 code that we should not step over. Do step to the beginning of the
5477 last line of code in it. */
5478
5479 static void
5480 handle_step_into_function_backward (struct gdbarch *gdbarch,
5481 struct execution_control_state *ecs)
5482 {
5483 struct symtab *s;
5484 struct symtab_and_line stop_func_sal;
5485
5486 fill_in_stop_func (gdbarch, ecs);
5487
5488 s = find_pc_symtab (stop_pc);
5489 if (s && s->language != language_asm)
5490 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5491 ecs->stop_func_start);
5492
5493 stop_func_sal = find_pc_line (stop_pc, 0);
5494
5495 /* OK, we're just going to keep stepping here. */
5496 if (stop_func_sal.pc == stop_pc)
5497 {
5498 /* We're there already. Just stop stepping now. */
5499 ecs->event_thread->control.stop_step = 1;
5500 end_stepping_range ();
5501 stop_stepping (ecs);
5502 }
5503 else
5504 {
5505 /* Else just reset the step range and keep going.
5506 No step-resume breakpoint, they don't work for
5507 epilogues, which can have multiple entry paths. */
5508 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5509 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5510 keep_going (ecs);
5511 }
5512 return;
5513 }
5514
5515 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5516 This is used to both functions and to skip over code. */
5517
5518 static void
5519 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5520 struct symtab_and_line sr_sal,
5521 struct frame_id sr_id,
5522 enum bptype sr_type)
5523 {
5524 /* There should never be more than one step-resume or longjmp-resume
5525 breakpoint per thread, so we should never be setting a new
5526 step_resume_breakpoint when one is already active. */
5527 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5528 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5529
5530 if (debug_infrun)
5531 fprintf_unfiltered (gdb_stdlog,
5532 "infrun: inserting step-resume breakpoint at %s\n",
5533 paddress (gdbarch, sr_sal.pc));
5534
5535 inferior_thread ()->control.step_resume_breakpoint
5536 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5537 }
5538
5539 void
5540 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5541 struct symtab_and_line sr_sal,
5542 struct frame_id sr_id)
5543 {
5544 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5545 sr_sal, sr_id,
5546 bp_step_resume);
5547 }
5548
5549 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5550 This is used to skip a potential signal handler.
5551
5552 This is called with the interrupted function's frame. The signal
5553 handler, when it returns, will resume the interrupted function at
5554 RETURN_FRAME.pc. */
5555
5556 static void
5557 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5558 {
5559 struct symtab_and_line sr_sal;
5560 struct gdbarch *gdbarch;
5561
5562 gdb_assert (return_frame != NULL);
5563 init_sal (&sr_sal); /* initialize to zeros */
5564
5565 gdbarch = get_frame_arch (return_frame);
5566 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5567 sr_sal.section = find_pc_overlay (sr_sal.pc);
5568 sr_sal.pspace = get_frame_program_space (return_frame);
5569
5570 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5571 get_stack_frame_id (return_frame),
5572 bp_hp_step_resume);
5573 }
5574
5575 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5576 is used to skip a function after stepping into it (for "next" or if
5577 the called function has no debugging information).
5578
5579 The current function has almost always been reached by single
5580 stepping a call or return instruction. NEXT_FRAME belongs to the
5581 current function, and the breakpoint will be set at the caller's
5582 resume address.
5583
5584 This is a separate function rather than reusing
5585 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5586 get_prev_frame, which may stop prematurely (see the implementation
5587 of frame_unwind_caller_id for an example). */
5588
5589 static void
5590 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5591 {
5592 struct symtab_and_line sr_sal;
5593 struct gdbarch *gdbarch;
5594
5595 /* We shouldn't have gotten here if we don't know where the call site
5596 is. */
5597 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5598
5599 init_sal (&sr_sal); /* initialize to zeros */
5600
5601 gdbarch = frame_unwind_caller_arch (next_frame);
5602 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5603 frame_unwind_caller_pc (next_frame));
5604 sr_sal.section = find_pc_overlay (sr_sal.pc);
5605 sr_sal.pspace = frame_unwind_program_space (next_frame);
5606
5607 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5608 frame_unwind_caller_id (next_frame));
5609 }
5610
5611 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5612 new breakpoint at the target of a jmp_buf. The handling of
5613 longjmp-resume uses the same mechanisms used for handling
5614 "step-resume" breakpoints. */
5615
5616 static void
5617 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5618 {
5619 /* There should never be more than one longjmp-resume breakpoint per
5620 thread, so we should never be setting a new
5621 longjmp_resume_breakpoint when one is already active. */
5622 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5623
5624 if (debug_infrun)
5625 fprintf_unfiltered (gdb_stdlog,
5626 "infrun: inserting longjmp-resume breakpoint at %s\n",
5627 paddress (gdbarch, pc));
5628
5629 inferior_thread ()->control.exception_resume_breakpoint =
5630 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5631 }
5632
5633 /* Insert an exception resume breakpoint. TP is the thread throwing
5634 the exception. The block B is the block of the unwinder debug hook
5635 function. FRAME is the frame corresponding to the call to this
5636 function. SYM is the symbol of the function argument holding the
5637 target PC of the exception. */
5638
5639 static void
5640 insert_exception_resume_breakpoint (struct thread_info *tp,
5641 struct block *b,
5642 struct frame_info *frame,
5643 struct symbol *sym)
5644 {
5645 volatile struct gdb_exception e;
5646
5647 /* We want to ignore errors here. */
5648 TRY_CATCH (e, RETURN_MASK_ERROR)
5649 {
5650 struct symbol *vsym;
5651 struct value *value;
5652 CORE_ADDR handler;
5653 struct breakpoint *bp;
5654
5655 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5656 value = read_var_value (vsym, frame);
5657 /* If the value was optimized out, revert to the old behavior. */
5658 if (! value_optimized_out (value))
5659 {
5660 handler = value_as_address (value);
5661
5662 if (debug_infrun)
5663 fprintf_unfiltered (gdb_stdlog,
5664 "infrun: exception resume at %lx\n",
5665 (unsigned long) handler);
5666
5667 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5668 handler, bp_exception_resume);
5669
5670 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5671 frame = NULL;
5672
5673 bp->thread = tp->num;
5674 inferior_thread ()->control.exception_resume_breakpoint = bp;
5675 }
5676 }
5677 }
5678
5679 /* A helper for check_exception_resume that sets an
5680 exception-breakpoint based on a SystemTap probe. */
5681
5682 static void
5683 insert_exception_resume_from_probe (struct thread_info *tp,
5684 const struct bound_probe *probe,
5685 struct frame_info *frame)
5686 {
5687 struct value *arg_value;
5688 CORE_ADDR handler;
5689 struct breakpoint *bp;
5690
5691 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5692 if (!arg_value)
5693 return;
5694
5695 handler = value_as_address (arg_value);
5696
5697 if (debug_infrun)
5698 fprintf_unfiltered (gdb_stdlog,
5699 "infrun: exception resume at %s\n",
5700 paddress (get_objfile_arch (probe->objfile),
5701 handler));
5702
5703 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5704 handler, bp_exception_resume);
5705 bp->thread = tp->num;
5706 inferior_thread ()->control.exception_resume_breakpoint = bp;
5707 }
5708
5709 /* This is called when an exception has been intercepted. Check to
5710 see whether the exception's destination is of interest, and if so,
5711 set an exception resume breakpoint there. */
5712
5713 static void
5714 check_exception_resume (struct execution_control_state *ecs,
5715 struct frame_info *frame)
5716 {
5717 volatile struct gdb_exception e;
5718 struct bound_probe probe;
5719 struct symbol *func;
5720
5721 /* First see if this exception unwinding breakpoint was set via a
5722 SystemTap probe point. If so, the probe has two arguments: the
5723 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5724 set a breakpoint there. */
5725 probe = find_probe_by_pc (get_frame_pc (frame));
5726 if (probe.probe)
5727 {
5728 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
5729 return;
5730 }
5731
5732 func = get_frame_function (frame);
5733 if (!func)
5734 return;
5735
5736 TRY_CATCH (e, RETURN_MASK_ERROR)
5737 {
5738 struct block *b;
5739 struct block_iterator iter;
5740 struct symbol *sym;
5741 int argno = 0;
5742
5743 /* The exception breakpoint is a thread-specific breakpoint on
5744 the unwinder's debug hook, declared as:
5745
5746 void _Unwind_DebugHook (void *cfa, void *handler);
5747
5748 The CFA argument indicates the frame to which control is
5749 about to be transferred. HANDLER is the destination PC.
5750
5751 We ignore the CFA and set a temporary breakpoint at HANDLER.
5752 This is not extremely efficient but it avoids issues in gdb
5753 with computing the DWARF CFA, and it also works even in weird
5754 cases such as throwing an exception from inside a signal
5755 handler. */
5756
5757 b = SYMBOL_BLOCK_VALUE (func);
5758 ALL_BLOCK_SYMBOLS (b, iter, sym)
5759 {
5760 if (!SYMBOL_IS_ARGUMENT (sym))
5761 continue;
5762
5763 if (argno == 0)
5764 ++argno;
5765 else
5766 {
5767 insert_exception_resume_breakpoint (ecs->event_thread,
5768 b, frame, sym);
5769 break;
5770 }
5771 }
5772 }
5773 }
5774
5775 static void
5776 stop_stepping (struct execution_control_state *ecs)
5777 {
5778 if (debug_infrun)
5779 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5780
5781 clear_step_over_info ();
5782
5783 /* Let callers know we don't want to wait for the inferior anymore. */
5784 ecs->wait_some_more = 0;
5785 }
5786
5787 /* Called when we should continue running the inferior, because the
5788 current event doesn't cause a user visible stop. This does the
5789 resuming part; waiting for the next event is done elsewhere. */
5790
5791 static void
5792 keep_going (struct execution_control_state *ecs)
5793 {
5794 /* Make sure normal_stop is called if we get a QUIT handled before
5795 reaching resume. */
5796 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5797
5798 /* Save the pc before execution, to compare with pc after stop. */
5799 ecs->event_thread->prev_pc
5800 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5801
5802 if (ecs->event_thread->control.trap_expected
5803 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5804 {
5805 /* We haven't yet gotten our trap, and either: intercepted a
5806 non-signal event (e.g., a fork); or took a signal which we
5807 are supposed to pass through to the inferior. Simply
5808 continue. */
5809 discard_cleanups (old_cleanups);
5810 resume (currently_stepping (ecs->event_thread),
5811 ecs->event_thread->suspend.stop_signal);
5812 }
5813 else
5814 {
5815 volatile struct gdb_exception e;
5816 struct regcache *regcache = get_current_regcache ();
5817
5818 /* Either the trap was not expected, but we are continuing
5819 anyway (if we got a signal, the user asked it be passed to
5820 the child)
5821 -- or --
5822 We got our expected trap, but decided we should resume from
5823 it.
5824
5825 We're going to run this baby now!
5826
5827 Note that insert_breakpoints won't try to re-insert
5828 already inserted breakpoints. Therefore, we don't
5829 care if breakpoints were already inserted, or not. */
5830
5831 /* If we need to step over a breakpoint, and we're not using
5832 displaced stepping to do so, insert all breakpoints
5833 (watchpoints, etc.) but the one we're stepping over, step one
5834 instruction, and then re-insert the breakpoint when that step
5835 is finished. */
5836 if ((ecs->hit_singlestep_breakpoint
5837 || thread_still_needs_step_over (ecs->event_thread))
5838 && !use_displaced_stepping (get_regcache_arch (regcache)))
5839 {
5840 set_step_over_info (get_regcache_aspace (regcache),
5841 regcache_read_pc (regcache));
5842 }
5843 else
5844 clear_step_over_info ();
5845
5846 /* Stop stepping if inserting breakpoints fails. */
5847 TRY_CATCH (e, RETURN_MASK_ERROR)
5848 {
5849 insert_breakpoints ();
5850 }
5851 if (e.reason < 0)
5852 {
5853 exception_print (gdb_stderr, e);
5854 stop_stepping (ecs);
5855 return;
5856 }
5857
5858 ecs->event_thread->control.trap_expected
5859 = (ecs->event_thread->stepping_over_breakpoint
5860 || ecs->hit_singlestep_breakpoint);
5861
5862 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
5863 explicitly specifies that such a signal should be delivered
5864 to the target program). Typically, that would occur when a
5865 user is debugging a target monitor on a simulator: the target
5866 monitor sets a breakpoint; the simulator encounters this
5867 breakpoint and halts the simulation handing control to GDB;
5868 GDB, noting that the stop address doesn't map to any known
5869 breakpoint, returns control back to the simulator; the
5870 simulator then delivers the hardware equivalent of a
5871 GDB_SIGNAL_TRAP to the program being debugged. */
5872 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5873 && !signal_program[ecs->event_thread->suspend.stop_signal])
5874 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5875
5876 discard_cleanups (old_cleanups);
5877 resume (currently_stepping (ecs->event_thread),
5878 ecs->event_thread->suspend.stop_signal);
5879 }
5880
5881 prepare_to_wait (ecs);
5882 }
5883
5884 /* This function normally comes after a resume, before
5885 handle_inferior_event exits. It takes care of any last bits of
5886 housekeeping, and sets the all-important wait_some_more flag. */
5887
5888 static void
5889 prepare_to_wait (struct execution_control_state *ecs)
5890 {
5891 if (debug_infrun)
5892 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5893
5894 /* This is the old end of the while loop. Let everybody know we
5895 want to wait for the inferior some more and get called again
5896 soon. */
5897 ecs->wait_some_more = 1;
5898 }
5899
5900 /* We are done with the step range of a step/next/si/ni command.
5901 Called once for each n of a "step n" operation. Notify observers
5902 if not in the middle of doing a "step N" operation for N > 1. */
5903
5904 static void
5905 end_stepping_range (void)
5906 {
5907 if (inferior_thread ()->step_multi
5908 && inferior_thread ()->control.stop_step)
5909 return;
5910
5911 observer_notify_end_stepping_range ();
5912 }
5913
5914 /* Several print_*_reason functions to print why the inferior has stopped.
5915 We always print something when the inferior exits, or receives a signal.
5916 The rest of the cases are dealt with later on in normal_stop and
5917 print_it_typical. Ideally there should be a call to one of these
5918 print_*_reason functions functions from handle_inferior_event each time
5919 stop_stepping is called.
5920
5921 Note that we don't call these directly, instead we delegate that to
5922 the interpreters, through observers. Interpreters then call these
5923 with whatever uiout is right. */
5924
5925 void
5926 print_end_stepping_range_reason (struct ui_out *uiout)
5927 {
5928 /* For CLI-like interpreters, print nothing. */
5929
5930 if (ui_out_is_mi_like_p (uiout))
5931 {
5932 ui_out_field_string (uiout, "reason",
5933 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5934 }
5935 }
5936
5937 void
5938 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5939 {
5940 annotate_signalled ();
5941 if (ui_out_is_mi_like_p (uiout))
5942 ui_out_field_string
5943 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5944 ui_out_text (uiout, "\nProgram terminated with signal ");
5945 annotate_signal_name ();
5946 ui_out_field_string (uiout, "signal-name",
5947 gdb_signal_to_name (siggnal));
5948 annotate_signal_name_end ();
5949 ui_out_text (uiout, ", ");
5950 annotate_signal_string ();
5951 ui_out_field_string (uiout, "signal-meaning",
5952 gdb_signal_to_string (siggnal));
5953 annotate_signal_string_end ();
5954 ui_out_text (uiout, ".\n");
5955 ui_out_text (uiout, "The program no longer exists.\n");
5956 }
5957
5958 void
5959 print_exited_reason (struct ui_out *uiout, int exitstatus)
5960 {
5961 struct inferior *inf = current_inferior ();
5962 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5963
5964 annotate_exited (exitstatus);
5965 if (exitstatus)
5966 {
5967 if (ui_out_is_mi_like_p (uiout))
5968 ui_out_field_string (uiout, "reason",
5969 async_reason_lookup (EXEC_ASYNC_EXITED));
5970 ui_out_text (uiout, "[Inferior ");
5971 ui_out_text (uiout, plongest (inf->num));
5972 ui_out_text (uiout, " (");
5973 ui_out_text (uiout, pidstr);
5974 ui_out_text (uiout, ") exited with code ");
5975 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5976 ui_out_text (uiout, "]\n");
5977 }
5978 else
5979 {
5980 if (ui_out_is_mi_like_p (uiout))
5981 ui_out_field_string
5982 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5983 ui_out_text (uiout, "[Inferior ");
5984 ui_out_text (uiout, plongest (inf->num));
5985 ui_out_text (uiout, " (");
5986 ui_out_text (uiout, pidstr);
5987 ui_out_text (uiout, ") exited normally]\n");
5988 }
5989 }
5990
5991 void
5992 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
5993 {
5994 annotate_signal ();
5995
5996 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5997 {
5998 struct thread_info *t = inferior_thread ();
5999
6000 ui_out_text (uiout, "\n[");
6001 ui_out_field_string (uiout, "thread-name",
6002 target_pid_to_str (t->ptid));
6003 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6004 ui_out_text (uiout, " stopped");
6005 }
6006 else
6007 {
6008 ui_out_text (uiout, "\nProgram received signal ");
6009 annotate_signal_name ();
6010 if (ui_out_is_mi_like_p (uiout))
6011 ui_out_field_string
6012 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6013 ui_out_field_string (uiout, "signal-name",
6014 gdb_signal_to_name (siggnal));
6015 annotate_signal_name_end ();
6016 ui_out_text (uiout, ", ");
6017 annotate_signal_string ();
6018 ui_out_field_string (uiout, "signal-meaning",
6019 gdb_signal_to_string (siggnal));
6020 annotate_signal_string_end ();
6021 }
6022 ui_out_text (uiout, ".\n");
6023 }
6024
6025 void
6026 print_no_history_reason (struct ui_out *uiout)
6027 {
6028 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6029 }
6030
6031 /* Print current location without a level number, if we have changed
6032 functions or hit a breakpoint. Print source line if we have one.
6033 bpstat_print contains the logic deciding in detail what to print,
6034 based on the event(s) that just occurred. */
6035
6036 void
6037 print_stop_event (struct target_waitstatus *ws)
6038 {
6039 int bpstat_ret;
6040 int source_flag;
6041 int do_frame_printing = 1;
6042 struct thread_info *tp = inferior_thread ();
6043
6044 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6045 switch (bpstat_ret)
6046 {
6047 case PRINT_UNKNOWN:
6048 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6049 should) carry around the function and does (or should) use
6050 that when doing a frame comparison. */
6051 if (tp->control.stop_step
6052 && frame_id_eq (tp->control.step_frame_id,
6053 get_frame_id (get_current_frame ()))
6054 && step_start_function == find_pc_function (stop_pc))
6055 {
6056 /* Finished step, just print source line. */
6057 source_flag = SRC_LINE;
6058 }
6059 else
6060 {
6061 /* Print location and source line. */
6062 source_flag = SRC_AND_LOC;
6063 }
6064 break;
6065 case PRINT_SRC_AND_LOC:
6066 /* Print location and source line. */
6067 source_flag = SRC_AND_LOC;
6068 break;
6069 case PRINT_SRC_ONLY:
6070 source_flag = SRC_LINE;
6071 break;
6072 case PRINT_NOTHING:
6073 /* Something bogus. */
6074 source_flag = SRC_LINE;
6075 do_frame_printing = 0;
6076 break;
6077 default:
6078 internal_error (__FILE__, __LINE__, _("Unknown value."));
6079 }
6080
6081 /* The behavior of this routine with respect to the source
6082 flag is:
6083 SRC_LINE: Print only source line
6084 LOCATION: Print only location
6085 SRC_AND_LOC: Print location and source line. */
6086 if (do_frame_printing)
6087 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6088
6089 /* Display the auto-display expressions. */
6090 do_displays ();
6091 }
6092
6093 /* Here to return control to GDB when the inferior stops for real.
6094 Print appropriate messages, remove breakpoints, give terminal our modes.
6095
6096 STOP_PRINT_FRAME nonzero means print the executing frame
6097 (pc, function, args, file, line number and line text).
6098 BREAKPOINTS_FAILED nonzero means stop was due to error
6099 attempting to insert breakpoints. */
6100
6101 void
6102 normal_stop (void)
6103 {
6104 struct target_waitstatus last;
6105 ptid_t last_ptid;
6106 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6107
6108 get_last_target_status (&last_ptid, &last);
6109
6110 /* If an exception is thrown from this point on, make sure to
6111 propagate GDB's knowledge of the executing state to the
6112 frontend/user running state. A QUIT is an easy exception to see
6113 here, so do this before any filtered output. */
6114 if (!non_stop)
6115 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6116 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6117 && last.kind != TARGET_WAITKIND_EXITED
6118 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6119 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6120
6121 /* As with the notification of thread events, we want to delay
6122 notifying the user that we've switched thread context until
6123 the inferior actually stops.
6124
6125 There's no point in saying anything if the inferior has exited.
6126 Note that SIGNALLED here means "exited with a signal", not
6127 "received a signal".
6128
6129 Also skip saying anything in non-stop mode. In that mode, as we
6130 don't want GDB to switch threads behind the user's back, to avoid
6131 races where the user is typing a command to apply to thread x,
6132 but GDB switches to thread y before the user finishes entering
6133 the command, fetch_inferior_event installs a cleanup to restore
6134 the current thread back to the thread the user had selected right
6135 after this event is handled, so we're not really switching, only
6136 informing of a stop. */
6137 if (!non_stop
6138 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6139 && target_has_execution
6140 && last.kind != TARGET_WAITKIND_SIGNALLED
6141 && last.kind != TARGET_WAITKIND_EXITED
6142 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6143 {
6144 target_terminal_ours_for_output ();
6145 printf_filtered (_("[Switching to %s]\n"),
6146 target_pid_to_str (inferior_ptid));
6147 annotate_thread_changed ();
6148 previous_inferior_ptid = inferior_ptid;
6149 }
6150
6151 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6152 {
6153 gdb_assert (sync_execution || !target_can_async_p ());
6154
6155 target_terminal_ours_for_output ();
6156 printf_filtered (_("No unwaited-for children left.\n"));
6157 }
6158
6159 if (!breakpoints_always_inserted_mode () && target_has_execution)
6160 {
6161 if (remove_breakpoints ())
6162 {
6163 target_terminal_ours_for_output ();
6164 printf_filtered (_("Cannot remove breakpoints because "
6165 "program is no longer writable.\nFurther "
6166 "execution is probably impossible.\n"));
6167 }
6168 }
6169
6170 /* If an auto-display called a function and that got a signal,
6171 delete that auto-display to avoid an infinite recursion. */
6172
6173 if (stopped_by_random_signal)
6174 disable_current_display ();
6175
6176 /* Don't print a message if in the middle of doing a "step n"
6177 operation for n > 1 */
6178 if (target_has_execution
6179 && last.kind != TARGET_WAITKIND_SIGNALLED
6180 && last.kind != TARGET_WAITKIND_EXITED
6181 && inferior_thread ()->step_multi
6182 && inferior_thread ()->control.stop_step)
6183 goto done;
6184
6185 target_terminal_ours ();
6186 async_enable_stdin ();
6187
6188 /* Set the current source location. This will also happen if we
6189 display the frame below, but the current SAL will be incorrect
6190 during a user hook-stop function. */
6191 if (has_stack_frames () && !stop_stack_dummy)
6192 set_current_sal_from_frame (get_current_frame ());
6193
6194 /* Let the user/frontend see the threads as stopped, but do nothing
6195 if the thread was running an infcall. We may be e.g., evaluating
6196 a breakpoint condition. In that case, the thread had state
6197 THREAD_RUNNING before the infcall, and shall remain set to
6198 running, all without informing the user/frontend about state
6199 transition changes. If this is actually a call command, then the
6200 thread was originally already stopped, so there's no state to
6201 finish either. */
6202 if (target_has_execution && inferior_thread ()->control.in_infcall)
6203 discard_cleanups (old_chain);
6204 else
6205 do_cleanups (old_chain);
6206
6207 /* Look up the hook_stop and run it (CLI internally handles problem
6208 of stop_command's pre-hook not existing). */
6209 if (stop_command)
6210 catch_errors (hook_stop_stub, stop_command,
6211 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6212
6213 if (!has_stack_frames ())
6214 goto done;
6215
6216 if (last.kind == TARGET_WAITKIND_SIGNALLED
6217 || last.kind == TARGET_WAITKIND_EXITED)
6218 goto done;
6219
6220 /* Select innermost stack frame - i.e., current frame is frame 0,
6221 and current location is based on that.
6222 Don't do this on return from a stack dummy routine,
6223 or if the program has exited. */
6224
6225 if (!stop_stack_dummy)
6226 {
6227 select_frame (get_current_frame ());
6228
6229 /* If --batch-silent is enabled then there's no need to print the current
6230 source location, and to try risks causing an error message about
6231 missing source files. */
6232 if (stop_print_frame && !batch_silent)
6233 print_stop_event (&last);
6234 }
6235
6236 /* Save the function value return registers, if we care.
6237 We might be about to restore their previous contents. */
6238 if (inferior_thread ()->control.proceed_to_finish
6239 && execution_direction != EXEC_REVERSE)
6240 {
6241 /* This should not be necessary. */
6242 if (stop_registers)
6243 regcache_xfree (stop_registers);
6244
6245 /* NB: The copy goes through to the target picking up the value of
6246 all the registers. */
6247 stop_registers = regcache_dup (get_current_regcache ());
6248 }
6249
6250 if (stop_stack_dummy == STOP_STACK_DUMMY)
6251 {
6252 /* Pop the empty frame that contains the stack dummy.
6253 This also restores inferior state prior to the call
6254 (struct infcall_suspend_state). */
6255 struct frame_info *frame = get_current_frame ();
6256
6257 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6258 frame_pop (frame);
6259 /* frame_pop() calls reinit_frame_cache as the last thing it
6260 does which means there's currently no selected frame. We
6261 don't need to re-establish a selected frame if the dummy call
6262 returns normally, that will be done by
6263 restore_infcall_control_state. However, we do have to handle
6264 the case where the dummy call is returning after being
6265 stopped (e.g. the dummy call previously hit a breakpoint).
6266 We can't know which case we have so just always re-establish
6267 a selected frame here. */
6268 select_frame (get_current_frame ());
6269 }
6270
6271 done:
6272 annotate_stopped ();
6273
6274 /* Suppress the stop observer if we're in the middle of:
6275
6276 - a step n (n > 1), as there still more steps to be done.
6277
6278 - a "finish" command, as the observer will be called in
6279 finish_command_continuation, so it can include the inferior
6280 function's return value.
6281
6282 - calling an inferior function, as we pretend we inferior didn't
6283 run at all. The return value of the call is handled by the
6284 expression evaluator, through call_function_by_hand. */
6285
6286 if (!target_has_execution
6287 || last.kind == TARGET_WAITKIND_SIGNALLED
6288 || last.kind == TARGET_WAITKIND_EXITED
6289 || last.kind == TARGET_WAITKIND_NO_RESUMED
6290 || (!(inferior_thread ()->step_multi
6291 && inferior_thread ()->control.stop_step)
6292 && !(inferior_thread ()->control.stop_bpstat
6293 && inferior_thread ()->control.proceed_to_finish)
6294 && !inferior_thread ()->control.in_infcall))
6295 {
6296 if (!ptid_equal (inferior_ptid, null_ptid))
6297 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6298 stop_print_frame);
6299 else
6300 observer_notify_normal_stop (NULL, stop_print_frame);
6301 }
6302
6303 if (target_has_execution)
6304 {
6305 if (last.kind != TARGET_WAITKIND_SIGNALLED
6306 && last.kind != TARGET_WAITKIND_EXITED)
6307 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6308 Delete any breakpoint that is to be deleted at the next stop. */
6309 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6310 }
6311
6312 /* Try to get rid of automatically added inferiors that are no
6313 longer needed. Keeping those around slows down things linearly.
6314 Note that this never removes the current inferior. */
6315 prune_inferiors ();
6316 }
6317
6318 static int
6319 hook_stop_stub (void *cmd)
6320 {
6321 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6322 return (0);
6323 }
6324 \f
6325 int
6326 signal_stop_state (int signo)
6327 {
6328 return signal_stop[signo];
6329 }
6330
6331 int
6332 signal_print_state (int signo)
6333 {
6334 return signal_print[signo];
6335 }
6336
6337 int
6338 signal_pass_state (int signo)
6339 {
6340 return signal_program[signo];
6341 }
6342
6343 static void
6344 signal_cache_update (int signo)
6345 {
6346 if (signo == -1)
6347 {
6348 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6349 signal_cache_update (signo);
6350
6351 return;
6352 }
6353
6354 signal_pass[signo] = (signal_stop[signo] == 0
6355 && signal_print[signo] == 0
6356 && signal_program[signo] == 1
6357 && signal_catch[signo] == 0);
6358 }
6359
6360 int
6361 signal_stop_update (int signo, int state)
6362 {
6363 int ret = signal_stop[signo];
6364
6365 signal_stop[signo] = state;
6366 signal_cache_update (signo);
6367 return ret;
6368 }
6369
6370 int
6371 signal_print_update (int signo, int state)
6372 {
6373 int ret = signal_print[signo];
6374
6375 signal_print[signo] = state;
6376 signal_cache_update (signo);
6377 return ret;
6378 }
6379
6380 int
6381 signal_pass_update (int signo, int state)
6382 {
6383 int ret = signal_program[signo];
6384
6385 signal_program[signo] = state;
6386 signal_cache_update (signo);
6387 return ret;
6388 }
6389
6390 /* Update the global 'signal_catch' from INFO and notify the
6391 target. */
6392
6393 void
6394 signal_catch_update (const unsigned int *info)
6395 {
6396 int i;
6397
6398 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6399 signal_catch[i] = info[i] > 0;
6400 signal_cache_update (-1);
6401 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6402 }
6403
6404 static void
6405 sig_print_header (void)
6406 {
6407 printf_filtered (_("Signal Stop\tPrint\tPass "
6408 "to program\tDescription\n"));
6409 }
6410
6411 static void
6412 sig_print_info (enum gdb_signal oursig)
6413 {
6414 const char *name = gdb_signal_to_name (oursig);
6415 int name_padding = 13 - strlen (name);
6416
6417 if (name_padding <= 0)
6418 name_padding = 0;
6419
6420 printf_filtered ("%s", name);
6421 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6422 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6423 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6424 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6425 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6426 }
6427
6428 /* Specify how various signals in the inferior should be handled. */
6429
6430 static void
6431 handle_command (char *args, int from_tty)
6432 {
6433 char **argv;
6434 int digits, wordlen;
6435 int sigfirst, signum, siglast;
6436 enum gdb_signal oursig;
6437 int allsigs;
6438 int nsigs;
6439 unsigned char *sigs;
6440 struct cleanup *old_chain;
6441
6442 if (args == NULL)
6443 {
6444 error_no_arg (_("signal to handle"));
6445 }
6446
6447 /* Allocate and zero an array of flags for which signals to handle. */
6448
6449 nsigs = (int) GDB_SIGNAL_LAST;
6450 sigs = (unsigned char *) alloca (nsigs);
6451 memset (sigs, 0, nsigs);
6452
6453 /* Break the command line up into args. */
6454
6455 argv = gdb_buildargv (args);
6456 old_chain = make_cleanup_freeargv (argv);
6457
6458 /* Walk through the args, looking for signal oursigs, signal names, and
6459 actions. Signal numbers and signal names may be interspersed with
6460 actions, with the actions being performed for all signals cumulatively
6461 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6462
6463 while (*argv != NULL)
6464 {
6465 wordlen = strlen (*argv);
6466 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6467 {;
6468 }
6469 allsigs = 0;
6470 sigfirst = siglast = -1;
6471
6472 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6473 {
6474 /* Apply action to all signals except those used by the
6475 debugger. Silently skip those. */
6476 allsigs = 1;
6477 sigfirst = 0;
6478 siglast = nsigs - 1;
6479 }
6480 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6481 {
6482 SET_SIGS (nsigs, sigs, signal_stop);
6483 SET_SIGS (nsigs, sigs, signal_print);
6484 }
6485 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6486 {
6487 UNSET_SIGS (nsigs, sigs, signal_program);
6488 }
6489 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6490 {
6491 SET_SIGS (nsigs, sigs, signal_print);
6492 }
6493 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6494 {
6495 SET_SIGS (nsigs, sigs, signal_program);
6496 }
6497 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6498 {
6499 UNSET_SIGS (nsigs, sigs, signal_stop);
6500 }
6501 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6502 {
6503 SET_SIGS (nsigs, sigs, signal_program);
6504 }
6505 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6506 {
6507 UNSET_SIGS (nsigs, sigs, signal_print);
6508 UNSET_SIGS (nsigs, sigs, signal_stop);
6509 }
6510 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6511 {
6512 UNSET_SIGS (nsigs, sigs, signal_program);
6513 }
6514 else if (digits > 0)
6515 {
6516 /* It is numeric. The numeric signal refers to our own
6517 internal signal numbering from target.h, not to host/target
6518 signal number. This is a feature; users really should be
6519 using symbolic names anyway, and the common ones like
6520 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6521
6522 sigfirst = siglast = (int)
6523 gdb_signal_from_command (atoi (*argv));
6524 if ((*argv)[digits] == '-')
6525 {
6526 siglast = (int)
6527 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6528 }
6529 if (sigfirst > siglast)
6530 {
6531 /* Bet he didn't figure we'd think of this case... */
6532 signum = sigfirst;
6533 sigfirst = siglast;
6534 siglast = signum;
6535 }
6536 }
6537 else
6538 {
6539 oursig = gdb_signal_from_name (*argv);
6540 if (oursig != GDB_SIGNAL_UNKNOWN)
6541 {
6542 sigfirst = siglast = (int) oursig;
6543 }
6544 else
6545 {
6546 /* Not a number and not a recognized flag word => complain. */
6547 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6548 }
6549 }
6550
6551 /* If any signal numbers or symbol names were found, set flags for
6552 which signals to apply actions to. */
6553
6554 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6555 {
6556 switch ((enum gdb_signal) signum)
6557 {
6558 case GDB_SIGNAL_TRAP:
6559 case GDB_SIGNAL_INT:
6560 if (!allsigs && !sigs[signum])
6561 {
6562 if (query (_("%s is used by the debugger.\n\
6563 Are you sure you want to change it? "),
6564 gdb_signal_to_name ((enum gdb_signal) signum)))
6565 {
6566 sigs[signum] = 1;
6567 }
6568 else
6569 {
6570 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6571 gdb_flush (gdb_stdout);
6572 }
6573 }
6574 break;
6575 case GDB_SIGNAL_0:
6576 case GDB_SIGNAL_DEFAULT:
6577 case GDB_SIGNAL_UNKNOWN:
6578 /* Make sure that "all" doesn't print these. */
6579 break;
6580 default:
6581 sigs[signum] = 1;
6582 break;
6583 }
6584 }
6585
6586 argv++;
6587 }
6588
6589 for (signum = 0; signum < nsigs; signum++)
6590 if (sigs[signum])
6591 {
6592 signal_cache_update (-1);
6593 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6594 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6595
6596 if (from_tty)
6597 {
6598 /* Show the results. */
6599 sig_print_header ();
6600 for (; signum < nsigs; signum++)
6601 if (sigs[signum])
6602 sig_print_info (signum);
6603 }
6604
6605 break;
6606 }
6607
6608 do_cleanups (old_chain);
6609 }
6610
6611 /* Complete the "handle" command. */
6612
6613 static VEC (char_ptr) *
6614 handle_completer (struct cmd_list_element *ignore,
6615 const char *text, const char *word)
6616 {
6617 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6618 static const char * const keywords[] =
6619 {
6620 "all",
6621 "stop",
6622 "ignore",
6623 "print",
6624 "pass",
6625 "nostop",
6626 "noignore",
6627 "noprint",
6628 "nopass",
6629 NULL,
6630 };
6631
6632 vec_signals = signal_completer (ignore, text, word);
6633 vec_keywords = complete_on_enum (keywords, word, word);
6634
6635 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6636 VEC_free (char_ptr, vec_signals);
6637 VEC_free (char_ptr, vec_keywords);
6638 return return_val;
6639 }
6640
6641 static void
6642 xdb_handle_command (char *args, int from_tty)
6643 {
6644 char **argv;
6645 struct cleanup *old_chain;
6646
6647 if (args == NULL)
6648 error_no_arg (_("xdb command"));
6649
6650 /* Break the command line up into args. */
6651
6652 argv = gdb_buildargv (args);
6653 old_chain = make_cleanup_freeargv (argv);
6654 if (argv[1] != (char *) NULL)
6655 {
6656 char *argBuf;
6657 int bufLen;
6658
6659 bufLen = strlen (argv[0]) + 20;
6660 argBuf = (char *) xmalloc (bufLen);
6661 if (argBuf)
6662 {
6663 int validFlag = 1;
6664 enum gdb_signal oursig;
6665
6666 oursig = gdb_signal_from_name (argv[0]);
6667 memset (argBuf, 0, bufLen);
6668 if (strcmp (argv[1], "Q") == 0)
6669 sprintf (argBuf, "%s %s", argv[0], "noprint");
6670 else
6671 {
6672 if (strcmp (argv[1], "s") == 0)
6673 {
6674 if (!signal_stop[oursig])
6675 sprintf (argBuf, "%s %s", argv[0], "stop");
6676 else
6677 sprintf (argBuf, "%s %s", argv[0], "nostop");
6678 }
6679 else if (strcmp (argv[1], "i") == 0)
6680 {
6681 if (!signal_program[oursig])
6682 sprintf (argBuf, "%s %s", argv[0], "pass");
6683 else
6684 sprintf (argBuf, "%s %s", argv[0], "nopass");
6685 }
6686 else if (strcmp (argv[1], "r") == 0)
6687 {
6688 if (!signal_print[oursig])
6689 sprintf (argBuf, "%s %s", argv[0], "print");
6690 else
6691 sprintf (argBuf, "%s %s", argv[0], "noprint");
6692 }
6693 else
6694 validFlag = 0;
6695 }
6696 if (validFlag)
6697 handle_command (argBuf, from_tty);
6698 else
6699 printf_filtered (_("Invalid signal handling flag.\n"));
6700 if (argBuf)
6701 xfree (argBuf);
6702 }
6703 }
6704 do_cleanups (old_chain);
6705 }
6706
6707 enum gdb_signal
6708 gdb_signal_from_command (int num)
6709 {
6710 if (num >= 1 && num <= 15)
6711 return (enum gdb_signal) num;
6712 error (_("Only signals 1-15 are valid as numeric signals.\n\
6713 Use \"info signals\" for a list of symbolic signals."));
6714 }
6715
6716 /* Print current contents of the tables set by the handle command.
6717 It is possible we should just be printing signals actually used
6718 by the current target (but for things to work right when switching
6719 targets, all signals should be in the signal tables). */
6720
6721 static void
6722 signals_info (char *signum_exp, int from_tty)
6723 {
6724 enum gdb_signal oursig;
6725
6726 sig_print_header ();
6727
6728 if (signum_exp)
6729 {
6730 /* First see if this is a symbol name. */
6731 oursig = gdb_signal_from_name (signum_exp);
6732 if (oursig == GDB_SIGNAL_UNKNOWN)
6733 {
6734 /* No, try numeric. */
6735 oursig =
6736 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6737 }
6738 sig_print_info (oursig);
6739 return;
6740 }
6741
6742 printf_filtered ("\n");
6743 /* These ugly casts brought to you by the native VAX compiler. */
6744 for (oursig = GDB_SIGNAL_FIRST;
6745 (int) oursig < (int) GDB_SIGNAL_LAST;
6746 oursig = (enum gdb_signal) ((int) oursig + 1))
6747 {
6748 QUIT;
6749
6750 if (oursig != GDB_SIGNAL_UNKNOWN
6751 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6752 sig_print_info (oursig);
6753 }
6754
6755 printf_filtered (_("\nUse the \"handle\" command "
6756 "to change these tables.\n"));
6757 }
6758
6759 /* Check if it makes sense to read $_siginfo from the current thread
6760 at this point. If not, throw an error. */
6761
6762 static void
6763 validate_siginfo_access (void)
6764 {
6765 /* No current inferior, no siginfo. */
6766 if (ptid_equal (inferior_ptid, null_ptid))
6767 error (_("No thread selected."));
6768
6769 /* Don't try to read from a dead thread. */
6770 if (is_exited (inferior_ptid))
6771 error (_("The current thread has terminated"));
6772
6773 /* ... or from a spinning thread. */
6774 if (is_running (inferior_ptid))
6775 error (_("Selected thread is running."));
6776 }
6777
6778 /* The $_siginfo convenience variable is a bit special. We don't know
6779 for sure the type of the value until we actually have a chance to
6780 fetch the data. The type can change depending on gdbarch, so it is
6781 also dependent on which thread you have selected.
6782
6783 1. making $_siginfo be an internalvar that creates a new value on
6784 access.
6785
6786 2. making the value of $_siginfo be an lval_computed value. */
6787
6788 /* This function implements the lval_computed support for reading a
6789 $_siginfo value. */
6790
6791 static void
6792 siginfo_value_read (struct value *v)
6793 {
6794 LONGEST transferred;
6795
6796 validate_siginfo_access ();
6797
6798 transferred =
6799 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6800 NULL,
6801 value_contents_all_raw (v),
6802 value_offset (v),
6803 TYPE_LENGTH (value_type (v)));
6804
6805 if (transferred != TYPE_LENGTH (value_type (v)))
6806 error (_("Unable to read siginfo"));
6807 }
6808
6809 /* This function implements the lval_computed support for writing a
6810 $_siginfo value. */
6811
6812 static void
6813 siginfo_value_write (struct value *v, struct value *fromval)
6814 {
6815 LONGEST transferred;
6816
6817 validate_siginfo_access ();
6818
6819 transferred = target_write (&current_target,
6820 TARGET_OBJECT_SIGNAL_INFO,
6821 NULL,
6822 value_contents_all_raw (fromval),
6823 value_offset (v),
6824 TYPE_LENGTH (value_type (fromval)));
6825
6826 if (transferred != TYPE_LENGTH (value_type (fromval)))
6827 error (_("Unable to write siginfo"));
6828 }
6829
6830 static const struct lval_funcs siginfo_value_funcs =
6831 {
6832 siginfo_value_read,
6833 siginfo_value_write
6834 };
6835
6836 /* Return a new value with the correct type for the siginfo object of
6837 the current thread using architecture GDBARCH. Return a void value
6838 if there's no object available. */
6839
6840 static struct value *
6841 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6842 void *ignore)
6843 {
6844 if (target_has_stack
6845 && !ptid_equal (inferior_ptid, null_ptid)
6846 && gdbarch_get_siginfo_type_p (gdbarch))
6847 {
6848 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6849
6850 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6851 }
6852
6853 return allocate_value (builtin_type (gdbarch)->builtin_void);
6854 }
6855
6856 \f
6857 /* infcall_suspend_state contains state about the program itself like its
6858 registers and any signal it received when it last stopped.
6859 This state must be restored regardless of how the inferior function call
6860 ends (either successfully, or after it hits a breakpoint or signal)
6861 if the program is to properly continue where it left off. */
6862
6863 struct infcall_suspend_state
6864 {
6865 struct thread_suspend_state thread_suspend;
6866 #if 0 /* Currently unused and empty structures are not valid C. */
6867 struct inferior_suspend_state inferior_suspend;
6868 #endif
6869
6870 /* Other fields: */
6871 CORE_ADDR stop_pc;
6872 struct regcache *registers;
6873
6874 /* Format of SIGINFO_DATA or NULL if it is not present. */
6875 struct gdbarch *siginfo_gdbarch;
6876
6877 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6878 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6879 content would be invalid. */
6880 gdb_byte *siginfo_data;
6881 };
6882
6883 struct infcall_suspend_state *
6884 save_infcall_suspend_state (void)
6885 {
6886 struct infcall_suspend_state *inf_state;
6887 struct thread_info *tp = inferior_thread ();
6888 #if 0
6889 struct inferior *inf = current_inferior ();
6890 #endif
6891 struct regcache *regcache = get_current_regcache ();
6892 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6893 gdb_byte *siginfo_data = NULL;
6894
6895 if (gdbarch_get_siginfo_type_p (gdbarch))
6896 {
6897 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6898 size_t len = TYPE_LENGTH (type);
6899 struct cleanup *back_to;
6900
6901 siginfo_data = xmalloc (len);
6902 back_to = make_cleanup (xfree, siginfo_data);
6903
6904 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6905 siginfo_data, 0, len) == len)
6906 discard_cleanups (back_to);
6907 else
6908 {
6909 /* Errors ignored. */
6910 do_cleanups (back_to);
6911 siginfo_data = NULL;
6912 }
6913 }
6914
6915 inf_state = XCNEW (struct infcall_suspend_state);
6916
6917 if (siginfo_data)
6918 {
6919 inf_state->siginfo_gdbarch = gdbarch;
6920 inf_state->siginfo_data = siginfo_data;
6921 }
6922
6923 inf_state->thread_suspend = tp->suspend;
6924 #if 0 /* Currently unused and empty structures are not valid C. */
6925 inf_state->inferior_suspend = inf->suspend;
6926 #endif
6927
6928 /* run_inferior_call will not use the signal due to its `proceed' call with
6929 GDB_SIGNAL_0 anyway. */
6930 tp->suspend.stop_signal = GDB_SIGNAL_0;
6931
6932 inf_state->stop_pc = stop_pc;
6933
6934 inf_state->registers = regcache_dup (regcache);
6935
6936 return inf_state;
6937 }
6938
6939 /* Restore inferior session state to INF_STATE. */
6940
6941 void
6942 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6943 {
6944 struct thread_info *tp = inferior_thread ();
6945 #if 0
6946 struct inferior *inf = current_inferior ();
6947 #endif
6948 struct regcache *regcache = get_current_regcache ();
6949 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6950
6951 tp->suspend = inf_state->thread_suspend;
6952 #if 0 /* Currently unused and empty structures are not valid C. */
6953 inf->suspend = inf_state->inferior_suspend;
6954 #endif
6955
6956 stop_pc = inf_state->stop_pc;
6957
6958 if (inf_state->siginfo_gdbarch == gdbarch)
6959 {
6960 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6961
6962 /* Errors ignored. */
6963 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6964 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6965 }
6966
6967 /* The inferior can be gone if the user types "print exit(0)"
6968 (and perhaps other times). */
6969 if (target_has_execution)
6970 /* NB: The register write goes through to the target. */
6971 regcache_cpy (regcache, inf_state->registers);
6972
6973 discard_infcall_suspend_state (inf_state);
6974 }
6975
6976 static void
6977 do_restore_infcall_suspend_state_cleanup (void *state)
6978 {
6979 restore_infcall_suspend_state (state);
6980 }
6981
6982 struct cleanup *
6983 make_cleanup_restore_infcall_suspend_state
6984 (struct infcall_suspend_state *inf_state)
6985 {
6986 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6987 }
6988
6989 void
6990 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6991 {
6992 regcache_xfree (inf_state->registers);
6993 xfree (inf_state->siginfo_data);
6994 xfree (inf_state);
6995 }
6996
6997 struct regcache *
6998 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6999 {
7000 return inf_state->registers;
7001 }
7002
7003 /* infcall_control_state contains state regarding gdb's control of the
7004 inferior itself like stepping control. It also contains session state like
7005 the user's currently selected frame. */
7006
7007 struct infcall_control_state
7008 {
7009 struct thread_control_state thread_control;
7010 struct inferior_control_state inferior_control;
7011
7012 /* Other fields: */
7013 enum stop_stack_kind stop_stack_dummy;
7014 int stopped_by_random_signal;
7015 int stop_after_trap;
7016
7017 /* ID if the selected frame when the inferior function call was made. */
7018 struct frame_id selected_frame_id;
7019 };
7020
7021 /* Save all of the information associated with the inferior<==>gdb
7022 connection. */
7023
7024 struct infcall_control_state *
7025 save_infcall_control_state (void)
7026 {
7027 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7028 struct thread_info *tp = inferior_thread ();
7029 struct inferior *inf = current_inferior ();
7030
7031 inf_status->thread_control = tp->control;
7032 inf_status->inferior_control = inf->control;
7033
7034 tp->control.step_resume_breakpoint = NULL;
7035 tp->control.exception_resume_breakpoint = NULL;
7036
7037 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7038 chain. If caller's caller is walking the chain, they'll be happier if we
7039 hand them back the original chain when restore_infcall_control_state is
7040 called. */
7041 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7042
7043 /* Other fields: */
7044 inf_status->stop_stack_dummy = stop_stack_dummy;
7045 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7046 inf_status->stop_after_trap = stop_after_trap;
7047
7048 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7049
7050 return inf_status;
7051 }
7052
7053 static int
7054 restore_selected_frame (void *args)
7055 {
7056 struct frame_id *fid = (struct frame_id *) args;
7057 struct frame_info *frame;
7058
7059 frame = frame_find_by_id (*fid);
7060
7061 /* If inf_status->selected_frame_id is NULL, there was no previously
7062 selected frame. */
7063 if (frame == NULL)
7064 {
7065 warning (_("Unable to restore previously selected frame."));
7066 return 0;
7067 }
7068
7069 select_frame (frame);
7070
7071 return (1);
7072 }
7073
7074 /* Restore inferior session state to INF_STATUS. */
7075
7076 void
7077 restore_infcall_control_state (struct infcall_control_state *inf_status)
7078 {
7079 struct thread_info *tp = inferior_thread ();
7080 struct inferior *inf = current_inferior ();
7081
7082 if (tp->control.step_resume_breakpoint)
7083 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7084
7085 if (tp->control.exception_resume_breakpoint)
7086 tp->control.exception_resume_breakpoint->disposition
7087 = disp_del_at_next_stop;
7088
7089 /* Handle the bpstat_copy of the chain. */
7090 bpstat_clear (&tp->control.stop_bpstat);
7091
7092 tp->control = inf_status->thread_control;
7093 inf->control = inf_status->inferior_control;
7094
7095 /* Other fields: */
7096 stop_stack_dummy = inf_status->stop_stack_dummy;
7097 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7098 stop_after_trap = inf_status->stop_after_trap;
7099
7100 if (target_has_stack)
7101 {
7102 /* The point of catch_errors is that if the stack is clobbered,
7103 walking the stack might encounter a garbage pointer and
7104 error() trying to dereference it. */
7105 if (catch_errors
7106 (restore_selected_frame, &inf_status->selected_frame_id,
7107 "Unable to restore previously selected frame:\n",
7108 RETURN_MASK_ERROR) == 0)
7109 /* Error in restoring the selected frame. Select the innermost
7110 frame. */
7111 select_frame (get_current_frame ());
7112 }
7113
7114 xfree (inf_status);
7115 }
7116
7117 static void
7118 do_restore_infcall_control_state_cleanup (void *sts)
7119 {
7120 restore_infcall_control_state (sts);
7121 }
7122
7123 struct cleanup *
7124 make_cleanup_restore_infcall_control_state
7125 (struct infcall_control_state *inf_status)
7126 {
7127 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7128 }
7129
7130 void
7131 discard_infcall_control_state (struct infcall_control_state *inf_status)
7132 {
7133 if (inf_status->thread_control.step_resume_breakpoint)
7134 inf_status->thread_control.step_resume_breakpoint->disposition
7135 = disp_del_at_next_stop;
7136
7137 if (inf_status->thread_control.exception_resume_breakpoint)
7138 inf_status->thread_control.exception_resume_breakpoint->disposition
7139 = disp_del_at_next_stop;
7140
7141 /* See save_infcall_control_state for info on stop_bpstat. */
7142 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7143
7144 xfree (inf_status);
7145 }
7146 \f
7147 /* restore_inferior_ptid() will be used by the cleanup machinery
7148 to restore the inferior_ptid value saved in a call to
7149 save_inferior_ptid(). */
7150
7151 static void
7152 restore_inferior_ptid (void *arg)
7153 {
7154 ptid_t *saved_ptid_ptr = arg;
7155
7156 inferior_ptid = *saved_ptid_ptr;
7157 xfree (arg);
7158 }
7159
7160 /* Save the value of inferior_ptid so that it may be restored by a
7161 later call to do_cleanups(). Returns the struct cleanup pointer
7162 needed for later doing the cleanup. */
7163
7164 struct cleanup *
7165 save_inferior_ptid (void)
7166 {
7167 ptid_t *saved_ptid_ptr;
7168
7169 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7170 *saved_ptid_ptr = inferior_ptid;
7171 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7172 }
7173
7174 /* See inferior.h. */
7175
7176 void
7177 clear_exit_convenience_vars (void)
7178 {
7179 clear_internalvar (lookup_internalvar ("_exitsignal"));
7180 clear_internalvar (lookup_internalvar ("_exitcode"));
7181 }
7182 \f
7183
7184 /* User interface for reverse debugging:
7185 Set exec-direction / show exec-direction commands
7186 (returns error unless target implements to_set_exec_direction method). */
7187
7188 int execution_direction = EXEC_FORWARD;
7189 static const char exec_forward[] = "forward";
7190 static const char exec_reverse[] = "reverse";
7191 static const char *exec_direction = exec_forward;
7192 static const char *const exec_direction_names[] = {
7193 exec_forward,
7194 exec_reverse,
7195 NULL
7196 };
7197
7198 static void
7199 set_exec_direction_func (char *args, int from_tty,
7200 struct cmd_list_element *cmd)
7201 {
7202 if (target_can_execute_reverse)
7203 {
7204 if (!strcmp (exec_direction, exec_forward))
7205 execution_direction = EXEC_FORWARD;
7206 else if (!strcmp (exec_direction, exec_reverse))
7207 execution_direction = EXEC_REVERSE;
7208 }
7209 else
7210 {
7211 exec_direction = exec_forward;
7212 error (_("Target does not support this operation."));
7213 }
7214 }
7215
7216 static void
7217 show_exec_direction_func (struct ui_file *out, int from_tty,
7218 struct cmd_list_element *cmd, const char *value)
7219 {
7220 switch (execution_direction) {
7221 case EXEC_FORWARD:
7222 fprintf_filtered (out, _("Forward.\n"));
7223 break;
7224 case EXEC_REVERSE:
7225 fprintf_filtered (out, _("Reverse.\n"));
7226 break;
7227 default:
7228 internal_error (__FILE__, __LINE__,
7229 _("bogus execution_direction value: %d"),
7230 (int) execution_direction);
7231 }
7232 }
7233
7234 static void
7235 show_schedule_multiple (struct ui_file *file, int from_tty,
7236 struct cmd_list_element *c, const char *value)
7237 {
7238 fprintf_filtered (file, _("Resuming the execution of threads "
7239 "of all processes is %s.\n"), value);
7240 }
7241
7242 /* Implementation of `siginfo' variable. */
7243
7244 static const struct internalvar_funcs siginfo_funcs =
7245 {
7246 siginfo_make_value,
7247 NULL,
7248 NULL
7249 };
7250
7251 void
7252 _initialize_infrun (void)
7253 {
7254 int i;
7255 int numsigs;
7256 struct cmd_list_element *c;
7257
7258 add_info ("signals", signals_info, _("\
7259 What debugger does when program gets various signals.\n\
7260 Specify a signal as argument to print info on that signal only."));
7261 add_info_alias ("handle", "signals", 0);
7262
7263 c = add_com ("handle", class_run, handle_command, _("\
7264 Specify how to handle signals.\n\
7265 Usage: handle SIGNAL [ACTIONS]\n\
7266 Args are signals and actions to apply to those signals.\n\
7267 If no actions are specified, the current settings for the specified signals\n\
7268 will be displayed instead.\n\
7269 \n\
7270 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7271 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7272 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7273 The special arg \"all\" is recognized to mean all signals except those\n\
7274 used by the debugger, typically SIGTRAP and SIGINT.\n\
7275 \n\
7276 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7277 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7278 Stop means reenter debugger if this signal happens (implies print).\n\
7279 Print means print a message if this signal happens.\n\
7280 Pass means let program see this signal; otherwise program doesn't know.\n\
7281 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7282 Pass and Stop may be combined.\n\
7283 \n\
7284 Multiple signals may be specified. Signal numbers and signal names\n\
7285 may be interspersed with actions, with the actions being performed for\n\
7286 all signals cumulatively specified."));
7287 set_cmd_completer (c, handle_completer);
7288
7289 if (xdb_commands)
7290 {
7291 add_com ("lz", class_info, signals_info, _("\
7292 What debugger does when program gets various signals.\n\
7293 Specify a signal as argument to print info on that signal only."));
7294 add_com ("z", class_run, xdb_handle_command, _("\
7295 Specify how to handle a signal.\n\
7296 Args are signals and actions to apply to those signals.\n\
7297 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7298 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7299 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7300 The special arg \"all\" is recognized to mean all signals except those\n\
7301 used by the debugger, typically SIGTRAP and SIGINT.\n\
7302 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7303 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7304 nopass), \"Q\" (noprint)\n\
7305 Stop means reenter debugger if this signal happens (implies print).\n\
7306 Print means print a message if this signal happens.\n\
7307 Pass means let program see this signal; otherwise program doesn't know.\n\
7308 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7309 Pass and Stop may be combined."));
7310 }
7311
7312 if (!dbx_commands)
7313 stop_command = add_cmd ("stop", class_obscure,
7314 not_just_help_class_command, _("\
7315 There is no `stop' command, but you can set a hook on `stop'.\n\
7316 This allows you to set a list of commands to be run each time execution\n\
7317 of the program stops."), &cmdlist);
7318
7319 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7320 Set inferior debugging."), _("\
7321 Show inferior debugging."), _("\
7322 When non-zero, inferior specific debugging is enabled."),
7323 NULL,
7324 show_debug_infrun,
7325 &setdebuglist, &showdebuglist);
7326
7327 add_setshow_boolean_cmd ("displaced", class_maintenance,
7328 &debug_displaced, _("\
7329 Set displaced stepping debugging."), _("\
7330 Show displaced stepping debugging."), _("\
7331 When non-zero, displaced stepping specific debugging is enabled."),
7332 NULL,
7333 show_debug_displaced,
7334 &setdebuglist, &showdebuglist);
7335
7336 add_setshow_boolean_cmd ("non-stop", no_class,
7337 &non_stop_1, _("\
7338 Set whether gdb controls the inferior in non-stop mode."), _("\
7339 Show whether gdb controls the inferior in non-stop mode."), _("\
7340 When debugging a multi-threaded program and this setting is\n\
7341 off (the default, also called all-stop mode), when one thread stops\n\
7342 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7343 all other threads in the program while you interact with the thread of\n\
7344 interest. When you continue or step a thread, you can allow the other\n\
7345 threads to run, or have them remain stopped, but while you inspect any\n\
7346 thread's state, all threads stop.\n\
7347 \n\
7348 In non-stop mode, when one thread stops, other threads can continue\n\
7349 to run freely. You'll be able to step each thread independently,\n\
7350 leave it stopped or free to run as needed."),
7351 set_non_stop,
7352 show_non_stop,
7353 &setlist,
7354 &showlist);
7355
7356 numsigs = (int) GDB_SIGNAL_LAST;
7357 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7358 signal_print = (unsigned char *)
7359 xmalloc (sizeof (signal_print[0]) * numsigs);
7360 signal_program = (unsigned char *)
7361 xmalloc (sizeof (signal_program[0]) * numsigs);
7362 signal_catch = (unsigned char *)
7363 xmalloc (sizeof (signal_catch[0]) * numsigs);
7364 signal_pass = (unsigned char *)
7365 xmalloc (sizeof (signal_program[0]) * numsigs);
7366 for (i = 0; i < numsigs; i++)
7367 {
7368 signal_stop[i] = 1;
7369 signal_print[i] = 1;
7370 signal_program[i] = 1;
7371 signal_catch[i] = 0;
7372 }
7373
7374 /* Signals caused by debugger's own actions
7375 should not be given to the program afterwards. */
7376 signal_program[GDB_SIGNAL_TRAP] = 0;
7377 signal_program[GDB_SIGNAL_INT] = 0;
7378
7379 /* Signals that are not errors should not normally enter the debugger. */
7380 signal_stop[GDB_SIGNAL_ALRM] = 0;
7381 signal_print[GDB_SIGNAL_ALRM] = 0;
7382 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7383 signal_print[GDB_SIGNAL_VTALRM] = 0;
7384 signal_stop[GDB_SIGNAL_PROF] = 0;
7385 signal_print[GDB_SIGNAL_PROF] = 0;
7386 signal_stop[GDB_SIGNAL_CHLD] = 0;
7387 signal_print[GDB_SIGNAL_CHLD] = 0;
7388 signal_stop[GDB_SIGNAL_IO] = 0;
7389 signal_print[GDB_SIGNAL_IO] = 0;
7390 signal_stop[GDB_SIGNAL_POLL] = 0;
7391 signal_print[GDB_SIGNAL_POLL] = 0;
7392 signal_stop[GDB_SIGNAL_URG] = 0;
7393 signal_print[GDB_SIGNAL_URG] = 0;
7394 signal_stop[GDB_SIGNAL_WINCH] = 0;
7395 signal_print[GDB_SIGNAL_WINCH] = 0;
7396 signal_stop[GDB_SIGNAL_PRIO] = 0;
7397 signal_print[GDB_SIGNAL_PRIO] = 0;
7398
7399 /* These signals are used internally by user-level thread
7400 implementations. (See signal(5) on Solaris.) Like the above
7401 signals, a healthy program receives and handles them as part of
7402 its normal operation. */
7403 signal_stop[GDB_SIGNAL_LWP] = 0;
7404 signal_print[GDB_SIGNAL_LWP] = 0;
7405 signal_stop[GDB_SIGNAL_WAITING] = 0;
7406 signal_print[GDB_SIGNAL_WAITING] = 0;
7407 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7408 signal_print[GDB_SIGNAL_CANCEL] = 0;
7409
7410 /* Update cached state. */
7411 signal_cache_update (-1);
7412
7413 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7414 &stop_on_solib_events, _("\
7415 Set stopping for shared library events."), _("\
7416 Show stopping for shared library events."), _("\
7417 If nonzero, gdb will give control to the user when the dynamic linker\n\
7418 notifies gdb of shared library events. The most common event of interest\n\
7419 to the user would be loading/unloading of a new library."),
7420 set_stop_on_solib_events,
7421 show_stop_on_solib_events,
7422 &setlist, &showlist);
7423
7424 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7425 follow_fork_mode_kind_names,
7426 &follow_fork_mode_string, _("\
7427 Set debugger response to a program call of fork or vfork."), _("\
7428 Show debugger response to a program call of fork or vfork."), _("\
7429 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7430 parent - the original process is debugged after a fork\n\
7431 child - the new process is debugged after a fork\n\
7432 The unfollowed process will continue to run.\n\
7433 By default, the debugger will follow the parent process."),
7434 NULL,
7435 show_follow_fork_mode_string,
7436 &setlist, &showlist);
7437
7438 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7439 follow_exec_mode_names,
7440 &follow_exec_mode_string, _("\
7441 Set debugger response to a program call of exec."), _("\
7442 Show debugger response to a program call of exec."), _("\
7443 An exec call replaces the program image of a process.\n\
7444 \n\
7445 follow-exec-mode can be:\n\
7446 \n\
7447 new - the debugger creates a new inferior and rebinds the process\n\
7448 to this new inferior. The program the process was running before\n\
7449 the exec call can be restarted afterwards by restarting the original\n\
7450 inferior.\n\
7451 \n\
7452 same - the debugger keeps the process bound to the same inferior.\n\
7453 The new executable image replaces the previous executable loaded in\n\
7454 the inferior. Restarting the inferior after the exec call restarts\n\
7455 the executable the process was running after the exec call.\n\
7456 \n\
7457 By default, the debugger will use the same inferior."),
7458 NULL,
7459 show_follow_exec_mode_string,
7460 &setlist, &showlist);
7461
7462 add_setshow_enum_cmd ("scheduler-locking", class_run,
7463 scheduler_enums, &scheduler_mode, _("\
7464 Set mode for locking scheduler during execution."), _("\
7465 Show mode for locking scheduler during execution."), _("\
7466 off == no locking (threads may preempt at any time)\n\
7467 on == full locking (no thread except the current thread may run)\n\
7468 step == scheduler locked during every single-step operation.\n\
7469 In this mode, no other thread may run during a step command.\n\
7470 Other threads may run while stepping over a function call ('next')."),
7471 set_schedlock_func, /* traps on target vector */
7472 show_scheduler_mode,
7473 &setlist, &showlist);
7474
7475 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7476 Set mode for resuming threads of all processes."), _("\
7477 Show mode for resuming threads of all processes."), _("\
7478 When on, execution commands (such as 'continue' or 'next') resume all\n\
7479 threads of all processes. When off (which is the default), execution\n\
7480 commands only resume the threads of the current process. The set of\n\
7481 threads that are resumed is further refined by the scheduler-locking\n\
7482 mode (see help set scheduler-locking)."),
7483 NULL,
7484 show_schedule_multiple,
7485 &setlist, &showlist);
7486
7487 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7488 Set mode of the step operation."), _("\
7489 Show mode of the step operation."), _("\
7490 When set, doing a step over a function without debug line information\n\
7491 will stop at the first instruction of that function. Otherwise, the\n\
7492 function is skipped and the step command stops at a different source line."),
7493 NULL,
7494 show_step_stop_if_no_debug,
7495 &setlist, &showlist);
7496
7497 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7498 &can_use_displaced_stepping, _("\
7499 Set debugger's willingness to use displaced stepping."), _("\
7500 Show debugger's willingness to use displaced stepping."), _("\
7501 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7502 supported by the target architecture. If off, gdb will not use displaced\n\
7503 stepping to step over breakpoints, even if such is supported by the target\n\
7504 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7505 if the target architecture supports it and non-stop mode is active, but will not\n\
7506 use it in all-stop mode (see help set non-stop)."),
7507 NULL,
7508 show_can_use_displaced_stepping,
7509 &setlist, &showlist);
7510
7511 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7512 &exec_direction, _("Set direction of execution.\n\
7513 Options are 'forward' or 'reverse'."),
7514 _("Show direction of execution (forward/reverse)."),
7515 _("Tells gdb whether to execute forward or backward."),
7516 set_exec_direction_func, show_exec_direction_func,
7517 &setlist, &showlist);
7518
7519 /* Set/show detach-on-fork: user-settable mode. */
7520
7521 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7522 Set whether gdb will detach the child of a fork."), _("\
7523 Show whether gdb will detach the child of a fork."), _("\
7524 Tells gdb whether to detach the child of a fork."),
7525 NULL, NULL, &setlist, &showlist);
7526
7527 /* Set/show disable address space randomization mode. */
7528
7529 add_setshow_boolean_cmd ("disable-randomization", class_support,
7530 &disable_randomization, _("\
7531 Set disabling of debuggee's virtual address space randomization."), _("\
7532 Show disabling of debuggee's virtual address space randomization."), _("\
7533 When this mode is on (which is the default), randomization of the virtual\n\
7534 address space is disabled. Standalone programs run with the randomization\n\
7535 enabled by default on some platforms."),
7536 &set_disable_randomization,
7537 &show_disable_randomization,
7538 &setlist, &showlist);
7539
7540 /* ptid initializations */
7541 inferior_ptid = null_ptid;
7542 target_last_wait_ptid = minus_one_ptid;
7543
7544 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7545 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7546 observer_attach_thread_exit (infrun_thread_thread_exit);
7547 observer_attach_inferior_exit (infrun_inferior_exit);
7548
7549 /* Explicitly create without lookup, since that tries to create a
7550 value with a void typed value, and when we get here, gdbarch
7551 isn't initialized yet. At this point, we're quite sure there
7552 isn't another convenience variable of the same name. */
7553 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7554
7555 add_setshow_boolean_cmd ("observer", no_class,
7556 &observer_mode_1, _("\
7557 Set whether gdb controls the inferior in observer mode."), _("\
7558 Show whether gdb controls the inferior in observer mode."), _("\
7559 In observer mode, GDB can get data from the inferior, but not\n\
7560 affect its execution. Registers and memory may not be changed,\n\
7561 breakpoints may not be set, and the program cannot be interrupted\n\
7562 or signalled."),
7563 set_observer_mode,
7564 show_observer_mode,
7565 &setlist,
7566 &showlist);
7567 }
This page took 0.286583 seconds and 5 git commands to generate.