8fb219a5cd3dfa6c188785ba3ff45762f2614319
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2013 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdb_string.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "record-full.h"
53 #include "inline-frame.h"
54 #include "jit.h"
55 #include "tracepoint.h"
56 #include "continuations.h"
57 #include "interps.h"
58 #include "skip.h"
59 #include "probe.h"
60 #include "objfiles.h"
61 #include "completer.h"
62 #include "target-descriptions.h"
63
64 /* Prototypes for local functions */
65
66 static void signals_info (char *, int);
67
68 static void handle_command (char *, int);
69
70 static void sig_print_info (enum gdb_signal);
71
72 static void sig_print_header (void);
73
74 static void resume_cleanups (void *);
75
76 static int hook_stop_stub (void *);
77
78 static int restore_selected_frame (void *);
79
80 static int follow_fork (void);
81
82 static void set_schedlock_func (char *args, int from_tty,
83 struct cmd_list_element *c);
84
85 static int currently_stepping (struct thread_info *tp);
86
87 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
88 void *data);
89
90 static void xdb_handle_command (char *args, int from_tty);
91
92 static int prepare_to_proceed (int);
93
94 static void print_exited_reason (int exitstatus);
95
96 static void print_signal_exited_reason (enum gdb_signal siggnal);
97
98 static void print_no_history_reason (void);
99
100 static void print_signal_received_reason (enum gdb_signal siggnal);
101
102 static void print_end_stepping_range_reason (void);
103
104 void _initialize_infrun (void);
105
106 void nullify_last_target_wait_ptid (void);
107
108 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
109
110 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
111
112 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
113
114 /* When set, stop the 'step' command if we enter a function which has
115 no line number information. The normal behavior is that we step
116 over such function. */
117 int step_stop_if_no_debug = 0;
118 static void
119 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
120 struct cmd_list_element *c, const char *value)
121 {
122 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
123 }
124
125 /* In asynchronous mode, but simulating synchronous execution. */
126
127 int sync_execution = 0;
128
129 /* wait_for_inferior and normal_stop use this to notify the user
130 when the inferior stopped in a different thread than it had been
131 running in. */
132
133 static ptid_t previous_inferior_ptid;
134
135 /* Default behavior is to detach newly forked processes (legacy). */
136 int detach_fork = 1;
137
138 int debug_displaced = 0;
139 static void
140 show_debug_displaced (struct ui_file *file, int from_tty,
141 struct cmd_list_element *c, const char *value)
142 {
143 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
144 }
145
146 unsigned int debug_infrun = 0;
147 static void
148 show_debug_infrun (struct ui_file *file, int from_tty,
149 struct cmd_list_element *c, const char *value)
150 {
151 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
152 }
153
154
155 /* Support for disabling address space randomization. */
156
157 int disable_randomization = 1;
158
159 static void
160 show_disable_randomization (struct ui_file *file, int from_tty,
161 struct cmd_list_element *c, const char *value)
162 {
163 if (target_supports_disable_randomization ())
164 fprintf_filtered (file,
165 _("Disabling randomization of debuggee's "
166 "virtual address space is %s.\n"),
167 value);
168 else
169 fputs_filtered (_("Disabling randomization of debuggee's "
170 "virtual address space is unsupported on\n"
171 "this platform.\n"), file);
172 }
173
174 static void
175 set_disable_randomization (char *args, int from_tty,
176 struct cmd_list_element *c)
177 {
178 if (!target_supports_disable_randomization ())
179 error (_("Disabling randomization of debuggee's "
180 "virtual address space is unsupported on\n"
181 "this platform."));
182 }
183
184 /* "Observer mode" is somewhat like a more extreme version of
185 non-stop, in which all GDB operations that might affect the
186 target's execution have been disabled. */
187
188 static int non_stop_1 = 0;
189
190 int observer_mode = 0;
191 static int observer_mode_1 = 0;
192
193 static void
194 set_observer_mode (char *args, int from_tty,
195 struct cmd_list_element *c)
196 {
197 extern int pagination_enabled;
198
199 if (target_has_execution)
200 {
201 observer_mode_1 = observer_mode;
202 error (_("Cannot change this setting while the inferior is running."));
203 }
204
205 observer_mode = observer_mode_1;
206
207 may_write_registers = !observer_mode;
208 may_write_memory = !observer_mode;
209 may_insert_breakpoints = !observer_mode;
210 may_insert_tracepoints = !observer_mode;
211 /* We can insert fast tracepoints in or out of observer mode,
212 but enable them if we're going into this mode. */
213 if (observer_mode)
214 may_insert_fast_tracepoints = 1;
215 may_stop = !observer_mode;
216 update_target_permissions ();
217
218 /* Going *into* observer mode we must force non-stop, then
219 going out we leave it that way. */
220 if (observer_mode)
221 {
222 target_async_permitted = 1;
223 pagination_enabled = 0;
224 non_stop = non_stop_1 = 1;
225 }
226
227 if (from_tty)
228 printf_filtered (_("Observer mode is now %s.\n"),
229 (observer_mode ? "on" : "off"));
230 }
231
232 static void
233 show_observer_mode (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235 {
236 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
237 }
238
239 /* This updates the value of observer mode based on changes in
240 permissions. Note that we are deliberately ignoring the values of
241 may-write-registers and may-write-memory, since the user may have
242 reason to enable these during a session, for instance to turn on a
243 debugging-related global. */
244
245 void
246 update_observer_mode (void)
247 {
248 int newval;
249
250 newval = (!may_insert_breakpoints
251 && !may_insert_tracepoints
252 && may_insert_fast_tracepoints
253 && !may_stop
254 && non_stop);
255
256 /* Let the user know if things change. */
257 if (newval != observer_mode)
258 printf_filtered (_("Observer mode is now %s.\n"),
259 (newval ? "on" : "off"));
260
261 observer_mode = observer_mode_1 = newval;
262 }
263
264 /* Tables of how to react to signals; the user sets them. */
265
266 static unsigned char *signal_stop;
267 static unsigned char *signal_print;
268 static unsigned char *signal_program;
269
270 /* Table of signals that are registered with "catch signal". A
271 non-zero entry indicates that the signal is caught by some "catch
272 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
273 signals. */
274 static unsigned char *signal_catch;
275
276 /* Table of signals that the target may silently handle.
277 This is automatically determined from the flags above,
278 and simply cached here. */
279 static unsigned char *signal_pass;
280
281 #define SET_SIGS(nsigs,sigs,flags) \
282 do { \
283 int signum = (nsigs); \
284 while (signum-- > 0) \
285 if ((sigs)[signum]) \
286 (flags)[signum] = 1; \
287 } while (0)
288
289 #define UNSET_SIGS(nsigs,sigs,flags) \
290 do { \
291 int signum = (nsigs); \
292 while (signum-- > 0) \
293 if ((sigs)[signum]) \
294 (flags)[signum] = 0; \
295 } while (0)
296
297 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
298 this function is to avoid exporting `signal_program'. */
299
300 void
301 update_signals_program_target (void)
302 {
303 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
304 }
305
306 /* Value to pass to target_resume() to cause all threads to resume. */
307
308 #define RESUME_ALL minus_one_ptid
309
310 /* Command list pointer for the "stop" placeholder. */
311
312 static struct cmd_list_element *stop_command;
313
314 /* Function inferior was in as of last step command. */
315
316 static struct symbol *step_start_function;
317
318 /* Nonzero if we want to give control to the user when we're notified
319 of shared library events by the dynamic linker. */
320 int stop_on_solib_events;
321
322 /* Enable or disable optional shared library event breakpoints
323 as appropriate when the above flag is changed. */
324
325 static void
326 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
327 {
328 update_solib_breakpoints ();
329 }
330
331 static void
332 show_stop_on_solib_events (struct ui_file *file, int from_tty,
333 struct cmd_list_element *c, const char *value)
334 {
335 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
336 value);
337 }
338
339 /* Nonzero means expecting a trace trap
340 and should stop the inferior and return silently when it happens. */
341
342 int stop_after_trap;
343
344 /* Save register contents here when executing a "finish" command or are
345 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
346 Thus this contains the return value from the called function (assuming
347 values are returned in a register). */
348
349 struct regcache *stop_registers;
350
351 /* Nonzero after stop if current stack frame should be printed. */
352
353 static int stop_print_frame;
354
355 /* This is a cached copy of the pid/waitstatus of the last event
356 returned by target_wait()/deprecated_target_wait_hook(). This
357 information is returned by get_last_target_status(). */
358 static ptid_t target_last_wait_ptid;
359 static struct target_waitstatus target_last_waitstatus;
360
361 static void context_switch (ptid_t ptid);
362
363 void init_thread_stepping_state (struct thread_info *tss);
364
365 static void init_infwait_state (void);
366
367 static const char follow_fork_mode_child[] = "child";
368 static const char follow_fork_mode_parent[] = "parent";
369
370 static const char *const follow_fork_mode_kind_names[] = {
371 follow_fork_mode_child,
372 follow_fork_mode_parent,
373 NULL
374 };
375
376 static const char *follow_fork_mode_string = follow_fork_mode_parent;
377 static void
378 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
379 struct cmd_list_element *c, const char *value)
380 {
381 fprintf_filtered (file,
382 _("Debugger response to a program "
383 "call of fork or vfork is \"%s\".\n"),
384 value);
385 }
386 \f
387
388 /* Tell the target to follow the fork we're stopped at. Returns true
389 if the inferior should be resumed; false, if the target for some
390 reason decided it's best not to resume. */
391
392 static int
393 follow_fork (void)
394 {
395 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
396 int should_resume = 1;
397 struct thread_info *tp;
398
399 /* Copy user stepping state to the new inferior thread. FIXME: the
400 followed fork child thread should have a copy of most of the
401 parent thread structure's run control related fields, not just these.
402 Initialized to avoid "may be used uninitialized" warnings from gcc. */
403 struct breakpoint *step_resume_breakpoint = NULL;
404 struct breakpoint *exception_resume_breakpoint = NULL;
405 CORE_ADDR step_range_start = 0;
406 CORE_ADDR step_range_end = 0;
407 struct frame_id step_frame_id = { 0 };
408
409 if (!non_stop)
410 {
411 ptid_t wait_ptid;
412 struct target_waitstatus wait_status;
413
414 /* Get the last target status returned by target_wait(). */
415 get_last_target_status (&wait_ptid, &wait_status);
416
417 /* If not stopped at a fork event, then there's nothing else to
418 do. */
419 if (wait_status.kind != TARGET_WAITKIND_FORKED
420 && wait_status.kind != TARGET_WAITKIND_VFORKED)
421 return 1;
422
423 /* Check if we switched over from WAIT_PTID, since the event was
424 reported. */
425 if (!ptid_equal (wait_ptid, minus_one_ptid)
426 && !ptid_equal (inferior_ptid, wait_ptid))
427 {
428 /* We did. Switch back to WAIT_PTID thread, to tell the
429 target to follow it (in either direction). We'll
430 afterwards refuse to resume, and inform the user what
431 happened. */
432 switch_to_thread (wait_ptid);
433 should_resume = 0;
434 }
435 }
436
437 tp = inferior_thread ();
438
439 /* If there were any forks/vforks that were caught and are now to be
440 followed, then do so now. */
441 switch (tp->pending_follow.kind)
442 {
443 case TARGET_WAITKIND_FORKED:
444 case TARGET_WAITKIND_VFORKED:
445 {
446 ptid_t parent, child;
447
448 /* If the user did a next/step, etc, over a fork call,
449 preserve the stepping state in the fork child. */
450 if (follow_child && should_resume)
451 {
452 step_resume_breakpoint = clone_momentary_breakpoint
453 (tp->control.step_resume_breakpoint);
454 step_range_start = tp->control.step_range_start;
455 step_range_end = tp->control.step_range_end;
456 step_frame_id = tp->control.step_frame_id;
457 exception_resume_breakpoint
458 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
459
460 /* For now, delete the parent's sr breakpoint, otherwise,
461 parent/child sr breakpoints are considered duplicates,
462 and the child version will not be installed. Remove
463 this when the breakpoints module becomes aware of
464 inferiors and address spaces. */
465 delete_step_resume_breakpoint (tp);
466 tp->control.step_range_start = 0;
467 tp->control.step_range_end = 0;
468 tp->control.step_frame_id = null_frame_id;
469 delete_exception_resume_breakpoint (tp);
470 }
471
472 parent = inferior_ptid;
473 child = tp->pending_follow.value.related_pid;
474
475 /* Tell the target to do whatever is necessary to follow
476 either parent or child. */
477 if (target_follow_fork (follow_child))
478 {
479 /* Target refused to follow, or there's some other reason
480 we shouldn't resume. */
481 should_resume = 0;
482 }
483 else
484 {
485 /* This pending follow fork event is now handled, one way
486 or another. The previous selected thread may be gone
487 from the lists by now, but if it is still around, need
488 to clear the pending follow request. */
489 tp = find_thread_ptid (parent);
490 if (tp)
491 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
492
493 /* This makes sure we don't try to apply the "Switched
494 over from WAIT_PID" logic above. */
495 nullify_last_target_wait_ptid ();
496
497 /* If we followed the child, switch to it... */
498 if (follow_child)
499 {
500 switch_to_thread (child);
501
502 /* ... and preserve the stepping state, in case the
503 user was stepping over the fork call. */
504 if (should_resume)
505 {
506 tp = inferior_thread ();
507 tp->control.step_resume_breakpoint
508 = step_resume_breakpoint;
509 tp->control.step_range_start = step_range_start;
510 tp->control.step_range_end = step_range_end;
511 tp->control.step_frame_id = step_frame_id;
512 tp->control.exception_resume_breakpoint
513 = exception_resume_breakpoint;
514 }
515 else
516 {
517 /* If we get here, it was because we're trying to
518 resume from a fork catchpoint, but, the user
519 has switched threads away from the thread that
520 forked. In that case, the resume command
521 issued is most likely not applicable to the
522 child, so just warn, and refuse to resume. */
523 warning (_("Not resuming: switched threads "
524 "before following fork child.\n"));
525 }
526
527 /* Reset breakpoints in the child as appropriate. */
528 follow_inferior_reset_breakpoints ();
529 }
530 else
531 switch_to_thread (parent);
532 }
533 }
534 break;
535 case TARGET_WAITKIND_SPURIOUS:
536 /* Nothing to follow. */
537 break;
538 default:
539 internal_error (__FILE__, __LINE__,
540 "Unexpected pending_follow.kind %d\n",
541 tp->pending_follow.kind);
542 break;
543 }
544
545 return should_resume;
546 }
547
548 void
549 follow_inferior_reset_breakpoints (void)
550 {
551 struct thread_info *tp = inferior_thread ();
552
553 /* Was there a step_resume breakpoint? (There was if the user
554 did a "next" at the fork() call.) If so, explicitly reset its
555 thread number.
556
557 step_resumes are a form of bp that are made to be per-thread.
558 Since we created the step_resume bp when the parent process
559 was being debugged, and now are switching to the child process,
560 from the breakpoint package's viewpoint, that's a switch of
561 "threads". We must update the bp's notion of which thread
562 it is for, or it'll be ignored when it triggers. */
563
564 if (tp->control.step_resume_breakpoint)
565 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
566
567 if (tp->control.exception_resume_breakpoint)
568 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
569
570 /* Reinsert all breakpoints in the child. The user may have set
571 breakpoints after catching the fork, in which case those
572 were never set in the child, but only in the parent. This makes
573 sure the inserted breakpoints match the breakpoint list. */
574
575 breakpoint_re_set ();
576 insert_breakpoints ();
577 }
578
579 /* The child has exited or execed: resume threads of the parent the
580 user wanted to be executing. */
581
582 static int
583 proceed_after_vfork_done (struct thread_info *thread,
584 void *arg)
585 {
586 int pid = * (int *) arg;
587
588 if (ptid_get_pid (thread->ptid) == pid
589 && is_running (thread->ptid)
590 && !is_executing (thread->ptid)
591 && !thread->stop_requested
592 && thread->suspend.stop_signal == GDB_SIGNAL_0)
593 {
594 if (debug_infrun)
595 fprintf_unfiltered (gdb_stdlog,
596 "infrun: resuming vfork parent thread %s\n",
597 target_pid_to_str (thread->ptid));
598
599 switch_to_thread (thread->ptid);
600 clear_proceed_status ();
601 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT, 0);
602 }
603
604 return 0;
605 }
606
607 /* Called whenever we notice an exec or exit event, to handle
608 detaching or resuming a vfork parent. */
609
610 static void
611 handle_vfork_child_exec_or_exit (int exec)
612 {
613 struct inferior *inf = current_inferior ();
614
615 if (inf->vfork_parent)
616 {
617 int resume_parent = -1;
618
619 /* This exec or exit marks the end of the shared memory region
620 between the parent and the child. If the user wanted to
621 detach from the parent, now is the time. */
622
623 if (inf->vfork_parent->pending_detach)
624 {
625 struct thread_info *tp;
626 struct cleanup *old_chain;
627 struct program_space *pspace;
628 struct address_space *aspace;
629
630 /* follow-fork child, detach-on-fork on. */
631
632 inf->vfork_parent->pending_detach = 0;
633
634 if (!exec)
635 {
636 /* If we're handling a child exit, then inferior_ptid
637 points at the inferior's pid, not to a thread. */
638 old_chain = save_inferior_ptid ();
639 save_current_program_space ();
640 save_current_inferior ();
641 }
642 else
643 old_chain = save_current_space_and_thread ();
644
645 /* We're letting loose of the parent. */
646 tp = any_live_thread_of_process (inf->vfork_parent->pid);
647 switch_to_thread (tp->ptid);
648
649 /* We're about to detach from the parent, which implicitly
650 removes breakpoints from its address space. There's a
651 catch here: we want to reuse the spaces for the child,
652 but, parent/child are still sharing the pspace at this
653 point, although the exec in reality makes the kernel give
654 the child a fresh set of new pages. The problem here is
655 that the breakpoints module being unaware of this, would
656 likely chose the child process to write to the parent
657 address space. Swapping the child temporarily away from
658 the spaces has the desired effect. Yes, this is "sort
659 of" a hack. */
660
661 pspace = inf->pspace;
662 aspace = inf->aspace;
663 inf->aspace = NULL;
664 inf->pspace = NULL;
665
666 if (debug_infrun || info_verbose)
667 {
668 target_terminal_ours ();
669
670 if (exec)
671 fprintf_filtered (gdb_stdlog,
672 "Detaching vfork parent process "
673 "%d after child exec.\n",
674 inf->vfork_parent->pid);
675 else
676 fprintf_filtered (gdb_stdlog,
677 "Detaching vfork parent process "
678 "%d after child exit.\n",
679 inf->vfork_parent->pid);
680 }
681
682 target_detach (NULL, 0);
683
684 /* Put it back. */
685 inf->pspace = pspace;
686 inf->aspace = aspace;
687
688 do_cleanups (old_chain);
689 }
690 else if (exec)
691 {
692 /* We're staying attached to the parent, so, really give the
693 child a new address space. */
694 inf->pspace = add_program_space (maybe_new_address_space ());
695 inf->aspace = inf->pspace->aspace;
696 inf->removable = 1;
697 set_current_program_space (inf->pspace);
698
699 resume_parent = inf->vfork_parent->pid;
700
701 /* Break the bonds. */
702 inf->vfork_parent->vfork_child = NULL;
703 }
704 else
705 {
706 struct cleanup *old_chain;
707 struct program_space *pspace;
708
709 /* If this is a vfork child exiting, then the pspace and
710 aspaces were shared with the parent. Since we're
711 reporting the process exit, we'll be mourning all that is
712 found in the address space, and switching to null_ptid,
713 preparing to start a new inferior. But, since we don't
714 want to clobber the parent's address/program spaces, we
715 go ahead and create a new one for this exiting
716 inferior. */
717
718 /* Switch to null_ptid, so that clone_program_space doesn't want
719 to read the selected frame of a dead process. */
720 old_chain = save_inferior_ptid ();
721 inferior_ptid = null_ptid;
722
723 /* This inferior is dead, so avoid giving the breakpoints
724 module the option to write through to it (cloning a
725 program space resets breakpoints). */
726 inf->aspace = NULL;
727 inf->pspace = NULL;
728 pspace = add_program_space (maybe_new_address_space ());
729 set_current_program_space (pspace);
730 inf->removable = 1;
731 inf->symfile_flags = SYMFILE_NO_READ;
732 clone_program_space (pspace, inf->vfork_parent->pspace);
733 inf->pspace = pspace;
734 inf->aspace = pspace->aspace;
735
736 /* Put back inferior_ptid. We'll continue mourning this
737 inferior. */
738 do_cleanups (old_chain);
739
740 resume_parent = inf->vfork_parent->pid;
741 /* Break the bonds. */
742 inf->vfork_parent->vfork_child = NULL;
743 }
744
745 inf->vfork_parent = NULL;
746
747 gdb_assert (current_program_space == inf->pspace);
748
749 if (non_stop && resume_parent != -1)
750 {
751 /* If the user wanted the parent to be running, let it go
752 free now. */
753 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
754
755 if (debug_infrun)
756 fprintf_unfiltered (gdb_stdlog,
757 "infrun: resuming vfork parent process %d\n",
758 resume_parent);
759
760 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
761
762 do_cleanups (old_chain);
763 }
764 }
765 }
766
767 /* Enum strings for "set|show follow-exec-mode". */
768
769 static const char follow_exec_mode_new[] = "new";
770 static const char follow_exec_mode_same[] = "same";
771 static const char *const follow_exec_mode_names[] =
772 {
773 follow_exec_mode_new,
774 follow_exec_mode_same,
775 NULL,
776 };
777
778 static const char *follow_exec_mode_string = follow_exec_mode_same;
779 static void
780 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
781 struct cmd_list_element *c, const char *value)
782 {
783 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
784 }
785
786 /* EXECD_PATHNAME is assumed to be non-NULL. */
787
788 static void
789 follow_exec (ptid_t pid, char *execd_pathname)
790 {
791 struct thread_info *th = inferior_thread ();
792 struct inferior *inf = current_inferior ();
793
794 /* This is an exec event that we actually wish to pay attention to.
795 Refresh our symbol table to the newly exec'd program, remove any
796 momentary bp's, etc.
797
798 If there are breakpoints, they aren't really inserted now,
799 since the exec() transformed our inferior into a fresh set
800 of instructions.
801
802 We want to preserve symbolic breakpoints on the list, since
803 we have hopes that they can be reset after the new a.out's
804 symbol table is read.
805
806 However, any "raw" breakpoints must be removed from the list
807 (e.g., the solib bp's), since their address is probably invalid
808 now.
809
810 And, we DON'T want to call delete_breakpoints() here, since
811 that may write the bp's "shadow contents" (the instruction
812 value that was overwritten witha TRAP instruction). Since
813 we now have a new a.out, those shadow contents aren't valid. */
814
815 mark_breakpoints_out ();
816
817 update_breakpoints_after_exec ();
818
819 /* If there was one, it's gone now. We cannot truly step-to-next
820 statement through an exec(). */
821 th->control.step_resume_breakpoint = NULL;
822 th->control.exception_resume_breakpoint = NULL;
823 th->control.step_range_start = 0;
824 th->control.step_range_end = 0;
825
826 /* The target reports the exec event to the main thread, even if
827 some other thread does the exec, and even if the main thread was
828 already stopped --- if debugging in non-stop mode, it's possible
829 the user had the main thread held stopped in the previous image
830 --- release it now. This is the same behavior as step-over-exec
831 with scheduler-locking on in all-stop mode. */
832 th->stop_requested = 0;
833
834 /* What is this a.out's name? */
835 printf_unfiltered (_("%s is executing new program: %s\n"),
836 target_pid_to_str (inferior_ptid),
837 execd_pathname);
838
839 /* We've followed the inferior through an exec. Therefore, the
840 inferior has essentially been killed & reborn. */
841
842 gdb_flush (gdb_stdout);
843
844 breakpoint_init_inferior (inf_execd);
845
846 if (gdb_sysroot && *gdb_sysroot)
847 {
848 char *name = alloca (strlen (gdb_sysroot)
849 + strlen (execd_pathname)
850 + 1);
851
852 strcpy (name, gdb_sysroot);
853 strcat (name, execd_pathname);
854 execd_pathname = name;
855 }
856
857 /* Reset the shared library package. This ensures that we get a
858 shlib event when the child reaches "_start", at which point the
859 dld will have had a chance to initialize the child. */
860 /* Also, loading a symbol file below may trigger symbol lookups, and
861 we don't want those to be satisfied by the libraries of the
862 previous incarnation of this process. */
863 no_shared_libraries (NULL, 0);
864
865 if (follow_exec_mode_string == follow_exec_mode_new)
866 {
867 struct program_space *pspace;
868
869 /* The user wants to keep the old inferior and program spaces
870 around. Create a new fresh one, and switch to it. */
871
872 inf = add_inferior (current_inferior ()->pid);
873 pspace = add_program_space (maybe_new_address_space ());
874 inf->pspace = pspace;
875 inf->aspace = pspace->aspace;
876
877 exit_inferior_num_silent (current_inferior ()->num);
878
879 set_current_inferior (inf);
880 set_current_program_space (pspace);
881 }
882 else
883 {
884 /* The old description may no longer be fit for the new image.
885 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
886 old description; we'll read a new one below. No need to do
887 this on "follow-exec-mode new", as the old inferior stays
888 around (its description is later cleared/refetched on
889 restart). */
890 target_clear_description ();
891 }
892
893 gdb_assert (current_program_space == inf->pspace);
894
895 /* That a.out is now the one to use. */
896 exec_file_attach (execd_pathname, 0);
897
898 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
899 (Position Independent Executable) main symbol file will get applied by
900 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
901 the breakpoints with the zero displacement. */
902
903 symbol_file_add (execd_pathname,
904 (inf->symfile_flags
905 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
906 NULL, 0);
907
908 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
909 set_initial_language ();
910
911 /* If the target can specify a description, read it. Must do this
912 after flipping to the new executable (because the target supplied
913 description must be compatible with the executable's
914 architecture, and the old executable may e.g., be 32-bit, while
915 the new one 64-bit), and before anything involving memory or
916 registers. */
917 target_find_description ();
918
919 solib_create_inferior_hook (0);
920
921 jit_inferior_created_hook ();
922
923 breakpoint_re_set ();
924
925 /* Reinsert all breakpoints. (Those which were symbolic have
926 been reset to the proper address in the new a.out, thanks
927 to symbol_file_command...). */
928 insert_breakpoints ();
929
930 /* The next resume of this inferior should bring it to the shlib
931 startup breakpoints. (If the user had also set bp's on
932 "main" from the old (parent) process, then they'll auto-
933 matically get reset there in the new process.). */
934 }
935
936 /* Non-zero if we just simulating a single-step. This is needed
937 because we cannot remove the breakpoints in the inferior process
938 until after the `wait' in `wait_for_inferior'. */
939 static int singlestep_breakpoints_inserted_p = 0;
940
941 /* The thread we inserted single-step breakpoints for. */
942 static ptid_t singlestep_ptid;
943
944 /* PC when we started this single-step. */
945 static CORE_ADDR singlestep_pc;
946
947 /* If another thread hit the singlestep breakpoint, we save the original
948 thread here so that we can resume single-stepping it later. */
949 static ptid_t saved_singlestep_ptid;
950 static int stepping_past_singlestep_breakpoint;
951
952 /* If not equal to null_ptid, this means that after stepping over breakpoint
953 is finished, we need to switch to deferred_step_ptid, and step it.
954
955 The use case is when one thread has hit a breakpoint, and then the user
956 has switched to another thread and issued 'step'. We need to step over
957 breakpoint in the thread which hit the breakpoint, but then continue
958 stepping the thread user has selected. */
959 static ptid_t deferred_step_ptid;
960 \f
961 /* Displaced stepping. */
962
963 /* In non-stop debugging mode, we must take special care to manage
964 breakpoints properly; in particular, the traditional strategy for
965 stepping a thread past a breakpoint it has hit is unsuitable.
966 'Displaced stepping' is a tactic for stepping one thread past a
967 breakpoint it has hit while ensuring that other threads running
968 concurrently will hit the breakpoint as they should.
969
970 The traditional way to step a thread T off a breakpoint in a
971 multi-threaded program in all-stop mode is as follows:
972
973 a0) Initially, all threads are stopped, and breakpoints are not
974 inserted.
975 a1) We single-step T, leaving breakpoints uninserted.
976 a2) We insert breakpoints, and resume all threads.
977
978 In non-stop debugging, however, this strategy is unsuitable: we
979 don't want to have to stop all threads in the system in order to
980 continue or step T past a breakpoint. Instead, we use displaced
981 stepping:
982
983 n0) Initially, T is stopped, other threads are running, and
984 breakpoints are inserted.
985 n1) We copy the instruction "under" the breakpoint to a separate
986 location, outside the main code stream, making any adjustments
987 to the instruction, register, and memory state as directed by
988 T's architecture.
989 n2) We single-step T over the instruction at its new location.
990 n3) We adjust the resulting register and memory state as directed
991 by T's architecture. This includes resetting T's PC to point
992 back into the main instruction stream.
993 n4) We resume T.
994
995 This approach depends on the following gdbarch methods:
996
997 - gdbarch_max_insn_length and gdbarch_displaced_step_location
998 indicate where to copy the instruction, and how much space must
999 be reserved there. We use these in step n1.
1000
1001 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1002 address, and makes any necessary adjustments to the instruction,
1003 register contents, and memory. We use this in step n1.
1004
1005 - gdbarch_displaced_step_fixup adjusts registers and memory after
1006 we have successfuly single-stepped the instruction, to yield the
1007 same effect the instruction would have had if we had executed it
1008 at its original address. We use this in step n3.
1009
1010 - gdbarch_displaced_step_free_closure provides cleanup.
1011
1012 The gdbarch_displaced_step_copy_insn and
1013 gdbarch_displaced_step_fixup functions must be written so that
1014 copying an instruction with gdbarch_displaced_step_copy_insn,
1015 single-stepping across the copied instruction, and then applying
1016 gdbarch_displaced_insn_fixup should have the same effects on the
1017 thread's memory and registers as stepping the instruction in place
1018 would have. Exactly which responsibilities fall to the copy and
1019 which fall to the fixup is up to the author of those functions.
1020
1021 See the comments in gdbarch.sh for details.
1022
1023 Note that displaced stepping and software single-step cannot
1024 currently be used in combination, although with some care I think
1025 they could be made to. Software single-step works by placing
1026 breakpoints on all possible subsequent instructions; if the
1027 displaced instruction is a PC-relative jump, those breakpoints
1028 could fall in very strange places --- on pages that aren't
1029 executable, or at addresses that are not proper instruction
1030 boundaries. (We do generally let other threads run while we wait
1031 to hit the software single-step breakpoint, and they might
1032 encounter such a corrupted instruction.) One way to work around
1033 this would be to have gdbarch_displaced_step_copy_insn fully
1034 simulate the effect of PC-relative instructions (and return NULL)
1035 on architectures that use software single-stepping.
1036
1037 In non-stop mode, we can have independent and simultaneous step
1038 requests, so more than one thread may need to simultaneously step
1039 over a breakpoint. The current implementation assumes there is
1040 only one scratch space per process. In this case, we have to
1041 serialize access to the scratch space. If thread A wants to step
1042 over a breakpoint, but we are currently waiting for some other
1043 thread to complete a displaced step, we leave thread A stopped and
1044 place it in the displaced_step_request_queue. Whenever a displaced
1045 step finishes, we pick the next thread in the queue and start a new
1046 displaced step operation on it. See displaced_step_prepare and
1047 displaced_step_fixup for details. */
1048
1049 struct displaced_step_request
1050 {
1051 ptid_t ptid;
1052 struct displaced_step_request *next;
1053 };
1054
1055 /* Per-inferior displaced stepping state. */
1056 struct displaced_step_inferior_state
1057 {
1058 /* Pointer to next in linked list. */
1059 struct displaced_step_inferior_state *next;
1060
1061 /* The process this displaced step state refers to. */
1062 int pid;
1063
1064 /* A queue of pending displaced stepping requests. One entry per
1065 thread that needs to do a displaced step. */
1066 struct displaced_step_request *step_request_queue;
1067
1068 /* If this is not null_ptid, this is the thread carrying out a
1069 displaced single-step in process PID. This thread's state will
1070 require fixing up once it has completed its step. */
1071 ptid_t step_ptid;
1072
1073 /* The architecture the thread had when we stepped it. */
1074 struct gdbarch *step_gdbarch;
1075
1076 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1077 for post-step cleanup. */
1078 struct displaced_step_closure *step_closure;
1079
1080 /* The address of the original instruction, and the copy we
1081 made. */
1082 CORE_ADDR step_original, step_copy;
1083
1084 /* Saved contents of copy area. */
1085 gdb_byte *step_saved_copy;
1086 };
1087
1088 /* The list of states of processes involved in displaced stepping
1089 presently. */
1090 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1091
1092 /* Get the displaced stepping state of process PID. */
1093
1094 static struct displaced_step_inferior_state *
1095 get_displaced_stepping_state (int pid)
1096 {
1097 struct displaced_step_inferior_state *state;
1098
1099 for (state = displaced_step_inferior_states;
1100 state != NULL;
1101 state = state->next)
1102 if (state->pid == pid)
1103 return state;
1104
1105 return NULL;
1106 }
1107
1108 /* Add a new displaced stepping state for process PID to the displaced
1109 stepping state list, or return a pointer to an already existing
1110 entry, if it already exists. Never returns NULL. */
1111
1112 static struct displaced_step_inferior_state *
1113 add_displaced_stepping_state (int pid)
1114 {
1115 struct displaced_step_inferior_state *state;
1116
1117 for (state = displaced_step_inferior_states;
1118 state != NULL;
1119 state = state->next)
1120 if (state->pid == pid)
1121 return state;
1122
1123 state = xcalloc (1, sizeof (*state));
1124 state->pid = pid;
1125 state->next = displaced_step_inferior_states;
1126 displaced_step_inferior_states = state;
1127
1128 return state;
1129 }
1130
1131 /* If inferior is in displaced stepping, and ADDR equals to starting address
1132 of copy area, return corresponding displaced_step_closure. Otherwise,
1133 return NULL. */
1134
1135 struct displaced_step_closure*
1136 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1137 {
1138 struct displaced_step_inferior_state *displaced
1139 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1140
1141 /* If checking the mode of displaced instruction in copy area. */
1142 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1143 && (displaced->step_copy == addr))
1144 return displaced->step_closure;
1145
1146 return NULL;
1147 }
1148
1149 /* Remove the displaced stepping state of process PID. */
1150
1151 static void
1152 remove_displaced_stepping_state (int pid)
1153 {
1154 struct displaced_step_inferior_state *it, **prev_next_p;
1155
1156 gdb_assert (pid != 0);
1157
1158 it = displaced_step_inferior_states;
1159 prev_next_p = &displaced_step_inferior_states;
1160 while (it)
1161 {
1162 if (it->pid == pid)
1163 {
1164 *prev_next_p = it->next;
1165 xfree (it);
1166 return;
1167 }
1168
1169 prev_next_p = &it->next;
1170 it = *prev_next_p;
1171 }
1172 }
1173
1174 static void
1175 infrun_inferior_exit (struct inferior *inf)
1176 {
1177 remove_displaced_stepping_state (inf->pid);
1178 }
1179
1180 /* If ON, and the architecture supports it, GDB will use displaced
1181 stepping to step over breakpoints. If OFF, or if the architecture
1182 doesn't support it, GDB will instead use the traditional
1183 hold-and-step approach. If AUTO (which is the default), GDB will
1184 decide which technique to use to step over breakpoints depending on
1185 which of all-stop or non-stop mode is active --- displaced stepping
1186 in non-stop mode; hold-and-step in all-stop mode. */
1187
1188 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1189
1190 static void
1191 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1192 struct cmd_list_element *c,
1193 const char *value)
1194 {
1195 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1196 fprintf_filtered (file,
1197 _("Debugger's willingness to use displaced stepping "
1198 "to step over breakpoints is %s (currently %s).\n"),
1199 value, non_stop ? "on" : "off");
1200 else
1201 fprintf_filtered (file,
1202 _("Debugger's willingness to use displaced stepping "
1203 "to step over breakpoints is %s.\n"), value);
1204 }
1205
1206 /* Return non-zero if displaced stepping can/should be used to step
1207 over breakpoints. */
1208
1209 static int
1210 use_displaced_stepping (struct gdbarch *gdbarch)
1211 {
1212 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1213 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1214 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1215 && !RECORD_IS_USED);
1216 }
1217
1218 /* Clean out any stray displaced stepping state. */
1219 static void
1220 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1221 {
1222 /* Indicate that there is no cleanup pending. */
1223 displaced->step_ptid = null_ptid;
1224
1225 if (displaced->step_closure)
1226 {
1227 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1228 displaced->step_closure);
1229 displaced->step_closure = NULL;
1230 }
1231 }
1232
1233 static void
1234 displaced_step_clear_cleanup (void *arg)
1235 {
1236 struct displaced_step_inferior_state *state = arg;
1237
1238 displaced_step_clear (state);
1239 }
1240
1241 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1242 void
1243 displaced_step_dump_bytes (struct ui_file *file,
1244 const gdb_byte *buf,
1245 size_t len)
1246 {
1247 int i;
1248
1249 for (i = 0; i < len; i++)
1250 fprintf_unfiltered (file, "%02x ", buf[i]);
1251 fputs_unfiltered ("\n", file);
1252 }
1253
1254 /* Prepare to single-step, using displaced stepping.
1255
1256 Note that we cannot use displaced stepping when we have a signal to
1257 deliver. If we have a signal to deliver and an instruction to step
1258 over, then after the step, there will be no indication from the
1259 target whether the thread entered a signal handler or ignored the
1260 signal and stepped over the instruction successfully --- both cases
1261 result in a simple SIGTRAP. In the first case we mustn't do a
1262 fixup, and in the second case we must --- but we can't tell which.
1263 Comments in the code for 'random signals' in handle_inferior_event
1264 explain how we handle this case instead.
1265
1266 Returns 1 if preparing was successful -- this thread is going to be
1267 stepped now; or 0 if displaced stepping this thread got queued. */
1268 static int
1269 displaced_step_prepare (ptid_t ptid)
1270 {
1271 struct cleanup *old_cleanups, *ignore_cleanups;
1272 struct thread_info *tp = find_thread_ptid (ptid);
1273 struct regcache *regcache = get_thread_regcache (ptid);
1274 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1275 CORE_ADDR original, copy;
1276 ULONGEST len;
1277 struct displaced_step_closure *closure;
1278 struct displaced_step_inferior_state *displaced;
1279 int status;
1280
1281 /* We should never reach this function if the architecture does not
1282 support displaced stepping. */
1283 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1284
1285 /* Disable range stepping while executing in the scratch pad. We
1286 want a single-step even if executing the displaced instruction in
1287 the scratch buffer lands within the stepping range (e.g., a
1288 jump/branch). */
1289 tp->control.may_range_step = 0;
1290
1291 /* We have to displaced step one thread at a time, as we only have
1292 access to a single scratch space per inferior. */
1293
1294 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1295
1296 if (!ptid_equal (displaced->step_ptid, null_ptid))
1297 {
1298 /* Already waiting for a displaced step to finish. Defer this
1299 request and place in queue. */
1300 struct displaced_step_request *req, *new_req;
1301
1302 if (debug_displaced)
1303 fprintf_unfiltered (gdb_stdlog,
1304 "displaced: defering step of %s\n",
1305 target_pid_to_str (ptid));
1306
1307 new_req = xmalloc (sizeof (*new_req));
1308 new_req->ptid = ptid;
1309 new_req->next = NULL;
1310
1311 if (displaced->step_request_queue)
1312 {
1313 for (req = displaced->step_request_queue;
1314 req && req->next;
1315 req = req->next)
1316 ;
1317 req->next = new_req;
1318 }
1319 else
1320 displaced->step_request_queue = new_req;
1321
1322 return 0;
1323 }
1324 else
1325 {
1326 if (debug_displaced)
1327 fprintf_unfiltered (gdb_stdlog,
1328 "displaced: stepping %s now\n",
1329 target_pid_to_str (ptid));
1330 }
1331
1332 displaced_step_clear (displaced);
1333
1334 old_cleanups = save_inferior_ptid ();
1335 inferior_ptid = ptid;
1336
1337 original = regcache_read_pc (regcache);
1338
1339 copy = gdbarch_displaced_step_location (gdbarch);
1340 len = gdbarch_max_insn_length (gdbarch);
1341
1342 /* Save the original contents of the copy area. */
1343 displaced->step_saved_copy = xmalloc (len);
1344 ignore_cleanups = make_cleanup (free_current_contents,
1345 &displaced->step_saved_copy);
1346 status = target_read_memory (copy, displaced->step_saved_copy, len);
1347 if (status != 0)
1348 throw_error (MEMORY_ERROR,
1349 _("Error accessing memory address %s (%s) for "
1350 "displaced-stepping scratch space."),
1351 paddress (gdbarch, copy), safe_strerror (status));
1352 if (debug_displaced)
1353 {
1354 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1355 paddress (gdbarch, copy));
1356 displaced_step_dump_bytes (gdb_stdlog,
1357 displaced->step_saved_copy,
1358 len);
1359 };
1360
1361 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1362 original, copy, regcache);
1363
1364 /* We don't support the fully-simulated case at present. */
1365 gdb_assert (closure);
1366
1367 /* Save the information we need to fix things up if the step
1368 succeeds. */
1369 displaced->step_ptid = ptid;
1370 displaced->step_gdbarch = gdbarch;
1371 displaced->step_closure = closure;
1372 displaced->step_original = original;
1373 displaced->step_copy = copy;
1374
1375 make_cleanup (displaced_step_clear_cleanup, displaced);
1376
1377 /* Resume execution at the copy. */
1378 regcache_write_pc (regcache, copy);
1379
1380 discard_cleanups (ignore_cleanups);
1381
1382 do_cleanups (old_cleanups);
1383
1384 if (debug_displaced)
1385 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1386 paddress (gdbarch, copy));
1387
1388 return 1;
1389 }
1390
1391 static void
1392 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1393 const gdb_byte *myaddr, int len)
1394 {
1395 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1396
1397 inferior_ptid = ptid;
1398 write_memory (memaddr, myaddr, len);
1399 do_cleanups (ptid_cleanup);
1400 }
1401
1402 /* Restore the contents of the copy area for thread PTID. */
1403
1404 static void
1405 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1406 ptid_t ptid)
1407 {
1408 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1409
1410 write_memory_ptid (ptid, displaced->step_copy,
1411 displaced->step_saved_copy, len);
1412 if (debug_displaced)
1413 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1414 target_pid_to_str (ptid),
1415 paddress (displaced->step_gdbarch,
1416 displaced->step_copy));
1417 }
1418
1419 static void
1420 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1421 {
1422 struct cleanup *old_cleanups;
1423 struct displaced_step_inferior_state *displaced
1424 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1425
1426 /* Was any thread of this process doing a displaced step? */
1427 if (displaced == NULL)
1428 return;
1429
1430 /* Was this event for the pid we displaced? */
1431 if (ptid_equal (displaced->step_ptid, null_ptid)
1432 || ! ptid_equal (displaced->step_ptid, event_ptid))
1433 return;
1434
1435 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1436
1437 displaced_step_restore (displaced, displaced->step_ptid);
1438
1439 /* Did the instruction complete successfully? */
1440 if (signal == GDB_SIGNAL_TRAP)
1441 {
1442 /* Fix up the resulting state. */
1443 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1444 displaced->step_closure,
1445 displaced->step_original,
1446 displaced->step_copy,
1447 get_thread_regcache (displaced->step_ptid));
1448 }
1449 else
1450 {
1451 /* Since the instruction didn't complete, all we can do is
1452 relocate the PC. */
1453 struct regcache *regcache = get_thread_regcache (event_ptid);
1454 CORE_ADDR pc = regcache_read_pc (regcache);
1455
1456 pc = displaced->step_original + (pc - displaced->step_copy);
1457 regcache_write_pc (regcache, pc);
1458 }
1459
1460 do_cleanups (old_cleanups);
1461
1462 displaced->step_ptid = null_ptid;
1463
1464 /* Are there any pending displaced stepping requests? If so, run
1465 one now. Leave the state object around, since we're likely to
1466 need it again soon. */
1467 while (displaced->step_request_queue)
1468 {
1469 struct displaced_step_request *head;
1470 ptid_t ptid;
1471 struct regcache *regcache;
1472 struct gdbarch *gdbarch;
1473 CORE_ADDR actual_pc;
1474 struct address_space *aspace;
1475
1476 head = displaced->step_request_queue;
1477 ptid = head->ptid;
1478 displaced->step_request_queue = head->next;
1479 xfree (head);
1480
1481 context_switch (ptid);
1482
1483 regcache = get_thread_regcache (ptid);
1484 actual_pc = regcache_read_pc (regcache);
1485 aspace = get_regcache_aspace (regcache);
1486
1487 if (breakpoint_here_p (aspace, actual_pc))
1488 {
1489 if (debug_displaced)
1490 fprintf_unfiltered (gdb_stdlog,
1491 "displaced: stepping queued %s now\n",
1492 target_pid_to_str (ptid));
1493
1494 displaced_step_prepare (ptid);
1495
1496 gdbarch = get_regcache_arch (regcache);
1497
1498 if (debug_displaced)
1499 {
1500 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1501 gdb_byte buf[4];
1502
1503 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1504 paddress (gdbarch, actual_pc));
1505 read_memory (actual_pc, buf, sizeof (buf));
1506 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1507 }
1508
1509 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1510 displaced->step_closure))
1511 target_resume (ptid, 1, GDB_SIGNAL_0);
1512 else
1513 target_resume (ptid, 0, GDB_SIGNAL_0);
1514
1515 /* Done, we're stepping a thread. */
1516 break;
1517 }
1518 else
1519 {
1520 int step;
1521 struct thread_info *tp = inferior_thread ();
1522
1523 /* The breakpoint we were sitting under has since been
1524 removed. */
1525 tp->control.trap_expected = 0;
1526
1527 /* Go back to what we were trying to do. */
1528 step = currently_stepping (tp);
1529
1530 if (debug_displaced)
1531 fprintf_unfiltered (gdb_stdlog,
1532 "displaced: breakpoint is gone: %s, step(%d)\n",
1533 target_pid_to_str (tp->ptid), step);
1534
1535 target_resume (ptid, step, GDB_SIGNAL_0);
1536 tp->suspend.stop_signal = GDB_SIGNAL_0;
1537
1538 /* This request was discarded. See if there's any other
1539 thread waiting for its turn. */
1540 }
1541 }
1542 }
1543
1544 /* Update global variables holding ptids to hold NEW_PTID if they were
1545 holding OLD_PTID. */
1546 static void
1547 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1548 {
1549 struct displaced_step_request *it;
1550 struct displaced_step_inferior_state *displaced;
1551
1552 if (ptid_equal (inferior_ptid, old_ptid))
1553 inferior_ptid = new_ptid;
1554
1555 if (ptid_equal (singlestep_ptid, old_ptid))
1556 singlestep_ptid = new_ptid;
1557
1558 if (ptid_equal (deferred_step_ptid, old_ptid))
1559 deferred_step_ptid = new_ptid;
1560
1561 for (displaced = displaced_step_inferior_states;
1562 displaced;
1563 displaced = displaced->next)
1564 {
1565 if (ptid_equal (displaced->step_ptid, old_ptid))
1566 displaced->step_ptid = new_ptid;
1567
1568 for (it = displaced->step_request_queue; it; it = it->next)
1569 if (ptid_equal (it->ptid, old_ptid))
1570 it->ptid = new_ptid;
1571 }
1572 }
1573
1574 \f
1575 /* Resuming. */
1576
1577 /* Things to clean up if we QUIT out of resume (). */
1578 static void
1579 resume_cleanups (void *ignore)
1580 {
1581 normal_stop ();
1582 }
1583
1584 static const char schedlock_off[] = "off";
1585 static const char schedlock_on[] = "on";
1586 static const char schedlock_step[] = "step";
1587 static const char *const scheduler_enums[] = {
1588 schedlock_off,
1589 schedlock_on,
1590 schedlock_step,
1591 NULL
1592 };
1593 static const char *scheduler_mode = schedlock_off;
1594 static void
1595 show_scheduler_mode (struct ui_file *file, int from_tty,
1596 struct cmd_list_element *c, const char *value)
1597 {
1598 fprintf_filtered (file,
1599 _("Mode for locking scheduler "
1600 "during execution is \"%s\".\n"),
1601 value);
1602 }
1603
1604 static void
1605 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1606 {
1607 if (!target_can_lock_scheduler)
1608 {
1609 scheduler_mode = schedlock_off;
1610 error (_("Target '%s' cannot support this command."), target_shortname);
1611 }
1612 }
1613
1614 /* True if execution commands resume all threads of all processes by
1615 default; otherwise, resume only threads of the current inferior
1616 process. */
1617 int sched_multi = 0;
1618
1619 /* Try to setup for software single stepping over the specified location.
1620 Return 1 if target_resume() should use hardware single step.
1621
1622 GDBARCH the current gdbarch.
1623 PC the location to step over. */
1624
1625 static int
1626 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1627 {
1628 int hw_step = 1;
1629
1630 if (execution_direction == EXEC_FORWARD
1631 && gdbarch_software_single_step_p (gdbarch)
1632 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1633 {
1634 hw_step = 0;
1635 /* Do not pull these breakpoints until after a `wait' in
1636 `wait_for_inferior'. */
1637 singlestep_breakpoints_inserted_p = 1;
1638 singlestep_ptid = inferior_ptid;
1639 singlestep_pc = pc;
1640 }
1641 return hw_step;
1642 }
1643
1644 /* Return a ptid representing the set of threads that we will proceed,
1645 in the perspective of the user/frontend. We may actually resume
1646 fewer threads at first, e.g., if a thread is stopped at a
1647 breakpoint that needs stepping-off, but that should not be visible
1648 to the user/frontend, and neither should the frontend/user be
1649 allowed to proceed any of the threads that happen to be stopped for
1650 internal run control handling, if a previous command wanted them
1651 resumed. */
1652
1653 ptid_t
1654 user_visible_resume_ptid (int step)
1655 {
1656 /* By default, resume all threads of all processes. */
1657 ptid_t resume_ptid = RESUME_ALL;
1658
1659 /* Maybe resume only all threads of the current process. */
1660 if (!sched_multi && target_supports_multi_process ())
1661 {
1662 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1663 }
1664
1665 /* Maybe resume a single thread after all. */
1666 if (non_stop)
1667 {
1668 /* With non-stop mode on, threads are always handled
1669 individually. */
1670 resume_ptid = inferior_ptid;
1671 }
1672 else if ((scheduler_mode == schedlock_on)
1673 || (scheduler_mode == schedlock_step
1674 && (step || singlestep_breakpoints_inserted_p)))
1675 {
1676 /* User-settable 'scheduler' mode requires solo thread resume. */
1677 resume_ptid = inferior_ptid;
1678 }
1679
1680 return resume_ptid;
1681 }
1682
1683 /* Resume the inferior, but allow a QUIT. This is useful if the user
1684 wants to interrupt some lengthy single-stepping operation
1685 (for child processes, the SIGINT goes to the inferior, and so
1686 we get a SIGINT random_signal, but for remote debugging and perhaps
1687 other targets, that's not true).
1688
1689 STEP nonzero if we should step (zero to continue instead).
1690 SIG is the signal to give the inferior (zero for none). */
1691 void
1692 resume (int step, enum gdb_signal sig)
1693 {
1694 int should_resume = 1;
1695 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1696 struct regcache *regcache = get_current_regcache ();
1697 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1698 struct thread_info *tp = inferior_thread ();
1699 CORE_ADDR pc = regcache_read_pc (regcache);
1700 struct address_space *aspace = get_regcache_aspace (regcache);
1701
1702 QUIT;
1703
1704 if (current_inferior ()->waiting_for_vfork_done)
1705 {
1706 /* Don't try to single-step a vfork parent that is waiting for
1707 the child to get out of the shared memory region (by exec'ing
1708 or exiting). This is particularly important on software
1709 single-step archs, as the child process would trip on the
1710 software single step breakpoint inserted for the parent
1711 process. Since the parent will not actually execute any
1712 instruction until the child is out of the shared region (such
1713 are vfork's semantics), it is safe to simply continue it.
1714 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1715 the parent, and tell it to `keep_going', which automatically
1716 re-sets it stepping. */
1717 if (debug_infrun)
1718 fprintf_unfiltered (gdb_stdlog,
1719 "infrun: resume : clear step\n");
1720 step = 0;
1721 }
1722
1723 if (debug_infrun)
1724 fprintf_unfiltered (gdb_stdlog,
1725 "infrun: resume (step=%d, signal=%d), "
1726 "trap_expected=%d, current thread [%s] at %s\n",
1727 step, sig, tp->control.trap_expected,
1728 target_pid_to_str (inferior_ptid),
1729 paddress (gdbarch, pc));
1730
1731 /* Normally, by the time we reach `resume', the breakpoints are either
1732 removed or inserted, as appropriate. The exception is if we're sitting
1733 at a permanent breakpoint; we need to step over it, but permanent
1734 breakpoints can't be removed. So we have to test for it here. */
1735 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1736 {
1737 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1738 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1739 else
1740 error (_("\
1741 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1742 how to step past a permanent breakpoint on this architecture. Try using\n\
1743 a command like `return' or `jump' to continue execution."));
1744 }
1745
1746 /* If we have a breakpoint to step over, make sure to do a single
1747 step only. Same if we have software watchpoints. */
1748 if (tp->control.trap_expected || bpstat_should_step ())
1749 tp->control.may_range_step = 0;
1750
1751 /* If enabled, step over breakpoints by executing a copy of the
1752 instruction at a different address.
1753
1754 We can't use displaced stepping when we have a signal to deliver;
1755 the comments for displaced_step_prepare explain why. The
1756 comments in the handle_inferior event for dealing with 'random
1757 signals' explain what we do instead.
1758
1759 We can't use displaced stepping when we are waiting for vfork_done
1760 event, displaced stepping breaks the vfork child similarly as single
1761 step software breakpoint. */
1762 if (use_displaced_stepping (gdbarch)
1763 && (tp->control.trap_expected
1764 || (step && gdbarch_software_single_step_p (gdbarch)))
1765 && sig == GDB_SIGNAL_0
1766 && !current_inferior ()->waiting_for_vfork_done)
1767 {
1768 struct displaced_step_inferior_state *displaced;
1769
1770 if (!displaced_step_prepare (inferior_ptid))
1771 {
1772 /* Got placed in displaced stepping queue. Will be resumed
1773 later when all the currently queued displaced stepping
1774 requests finish. The thread is not executing at this point,
1775 and the call to set_executing will be made later. But we
1776 need to call set_running here, since from frontend point of view,
1777 the thread is running. */
1778 set_running (inferior_ptid, 1);
1779 discard_cleanups (old_cleanups);
1780 return;
1781 }
1782
1783 /* Update pc to reflect the new address from which we will execute
1784 instructions due to displaced stepping. */
1785 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
1786
1787 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1788 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1789 displaced->step_closure);
1790 }
1791
1792 /* Do we need to do it the hard way, w/temp breakpoints? */
1793 else if (step)
1794 step = maybe_software_singlestep (gdbarch, pc);
1795
1796 /* Currently, our software single-step implementation leads to different
1797 results than hardware single-stepping in one situation: when stepping
1798 into delivering a signal which has an associated signal handler,
1799 hardware single-step will stop at the first instruction of the handler,
1800 while software single-step will simply skip execution of the handler.
1801
1802 For now, this difference in behavior is accepted since there is no
1803 easy way to actually implement single-stepping into a signal handler
1804 without kernel support.
1805
1806 However, there is one scenario where this difference leads to follow-on
1807 problems: if we're stepping off a breakpoint by removing all breakpoints
1808 and then single-stepping. In this case, the software single-step
1809 behavior means that even if there is a *breakpoint* in the signal
1810 handler, GDB still would not stop.
1811
1812 Fortunately, we can at least fix this particular issue. We detect
1813 here the case where we are about to deliver a signal while software
1814 single-stepping with breakpoints removed. In this situation, we
1815 revert the decisions to remove all breakpoints and insert single-
1816 step breakpoints, and instead we install a step-resume breakpoint
1817 at the current address, deliver the signal without stepping, and
1818 once we arrive back at the step-resume breakpoint, actually step
1819 over the breakpoint we originally wanted to step over. */
1820 if (singlestep_breakpoints_inserted_p
1821 && tp->control.trap_expected && sig != GDB_SIGNAL_0)
1822 {
1823 /* If we have nested signals or a pending signal is delivered
1824 immediately after a handler returns, might might already have
1825 a step-resume breakpoint set on the earlier handler. We cannot
1826 set another step-resume breakpoint; just continue on until the
1827 original breakpoint is hit. */
1828 if (tp->control.step_resume_breakpoint == NULL)
1829 {
1830 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1831 tp->step_after_step_resume_breakpoint = 1;
1832 }
1833
1834 remove_single_step_breakpoints ();
1835 singlestep_breakpoints_inserted_p = 0;
1836
1837 insert_breakpoints ();
1838 tp->control.trap_expected = 0;
1839 }
1840
1841 if (should_resume)
1842 {
1843 ptid_t resume_ptid;
1844
1845 /* If STEP is set, it's a request to use hardware stepping
1846 facilities. But in that case, we should never
1847 use singlestep breakpoint. */
1848 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1849
1850 /* Decide the set of threads to ask the target to resume. Start
1851 by assuming everything will be resumed, than narrow the set
1852 by applying increasingly restricting conditions. */
1853 resume_ptid = user_visible_resume_ptid (step);
1854
1855 /* Maybe resume a single thread after all. */
1856 if (singlestep_breakpoints_inserted_p
1857 && stepping_past_singlestep_breakpoint)
1858 {
1859 /* The situation here is as follows. In thread T1 we wanted to
1860 single-step. Lacking hardware single-stepping we've
1861 set breakpoint at the PC of the next instruction -- call it
1862 P. After resuming, we've hit that breakpoint in thread T2.
1863 Now we've removed original breakpoint, inserted breakpoint
1864 at P+1, and try to step to advance T2 past breakpoint.
1865 We need to step only T2, as if T1 is allowed to freely run,
1866 it can run past P, and if other threads are allowed to run,
1867 they can hit breakpoint at P+1, and nested hits of single-step
1868 breakpoints is not something we'd want -- that's complicated
1869 to support, and has no value. */
1870 resume_ptid = inferior_ptid;
1871 }
1872 else if ((step || singlestep_breakpoints_inserted_p)
1873 && tp->control.trap_expected)
1874 {
1875 /* We're allowing a thread to run past a breakpoint it has
1876 hit, by single-stepping the thread with the breakpoint
1877 removed. In which case, we need to single-step only this
1878 thread, and keep others stopped, as they can miss this
1879 breakpoint if allowed to run.
1880
1881 The current code actually removes all breakpoints when
1882 doing this, not just the one being stepped over, so if we
1883 let other threads run, we can actually miss any
1884 breakpoint, not just the one at PC. */
1885 resume_ptid = inferior_ptid;
1886 }
1887
1888 if (gdbarch_cannot_step_breakpoint (gdbarch))
1889 {
1890 /* Most targets can step a breakpoint instruction, thus
1891 executing it normally. But if this one cannot, just
1892 continue and we will hit it anyway. */
1893 if (step && breakpoint_inserted_here_p (aspace, pc))
1894 step = 0;
1895 }
1896
1897 if (debug_displaced
1898 && use_displaced_stepping (gdbarch)
1899 && tp->control.trap_expected)
1900 {
1901 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1902 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1903 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1904 gdb_byte buf[4];
1905
1906 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1907 paddress (resume_gdbarch, actual_pc));
1908 read_memory (actual_pc, buf, sizeof (buf));
1909 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1910 }
1911
1912 if (tp->control.may_range_step)
1913 {
1914 /* If we're resuming a thread with the PC out of the step
1915 range, then we're doing some nested/finer run control
1916 operation, like stepping the thread out of the dynamic
1917 linker or the displaced stepping scratch pad. We
1918 shouldn't have allowed a range step then. */
1919 gdb_assert (pc_in_thread_step_range (pc, tp));
1920 }
1921
1922 /* Install inferior's terminal modes. */
1923 target_terminal_inferior ();
1924
1925 /* Avoid confusing the next resume, if the next stop/resume
1926 happens to apply to another thread. */
1927 tp->suspend.stop_signal = GDB_SIGNAL_0;
1928
1929 /* Advise target which signals may be handled silently. If we have
1930 removed breakpoints because we are stepping over one (which can
1931 happen only if we are not using displaced stepping), we need to
1932 receive all signals to avoid accidentally skipping a breakpoint
1933 during execution of a signal handler. */
1934 if ((step || singlestep_breakpoints_inserted_p)
1935 && tp->control.trap_expected
1936 && !use_displaced_stepping (gdbarch))
1937 target_pass_signals (0, NULL);
1938 else
1939 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
1940
1941 target_resume (resume_ptid, step, sig);
1942 }
1943
1944 discard_cleanups (old_cleanups);
1945 }
1946 \f
1947 /* Proceeding. */
1948
1949 /* Clear out all variables saying what to do when inferior is continued.
1950 First do this, then set the ones you want, then call `proceed'. */
1951
1952 static void
1953 clear_proceed_status_thread (struct thread_info *tp)
1954 {
1955 if (debug_infrun)
1956 fprintf_unfiltered (gdb_stdlog,
1957 "infrun: clear_proceed_status_thread (%s)\n",
1958 target_pid_to_str (tp->ptid));
1959
1960 tp->control.trap_expected = 0;
1961 tp->control.step_range_start = 0;
1962 tp->control.step_range_end = 0;
1963 tp->control.may_range_step = 0;
1964 tp->control.step_frame_id = null_frame_id;
1965 tp->control.step_stack_frame_id = null_frame_id;
1966 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1967 tp->stop_requested = 0;
1968
1969 tp->control.stop_step = 0;
1970
1971 tp->control.proceed_to_finish = 0;
1972
1973 /* Discard any remaining commands or status from previous stop. */
1974 bpstat_clear (&tp->control.stop_bpstat);
1975 }
1976
1977 static int
1978 clear_proceed_status_callback (struct thread_info *tp, void *data)
1979 {
1980 if (is_exited (tp->ptid))
1981 return 0;
1982
1983 clear_proceed_status_thread (tp);
1984 return 0;
1985 }
1986
1987 void
1988 clear_proceed_status (void)
1989 {
1990 if (!non_stop)
1991 {
1992 /* In all-stop mode, delete the per-thread status of all
1993 threads, even if inferior_ptid is null_ptid, there may be
1994 threads on the list. E.g., we may be launching a new
1995 process, while selecting the executable. */
1996 iterate_over_threads (clear_proceed_status_callback, NULL);
1997 }
1998
1999 if (!ptid_equal (inferior_ptid, null_ptid))
2000 {
2001 struct inferior *inferior;
2002
2003 if (non_stop)
2004 {
2005 /* If in non-stop mode, only delete the per-thread status of
2006 the current thread. */
2007 clear_proceed_status_thread (inferior_thread ());
2008 }
2009
2010 inferior = current_inferior ();
2011 inferior->control.stop_soon = NO_STOP_QUIETLY;
2012 }
2013
2014 stop_after_trap = 0;
2015
2016 observer_notify_about_to_proceed ();
2017
2018 if (stop_registers)
2019 {
2020 regcache_xfree (stop_registers);
2021 stop_registers = NULL;
2022 }
2023 }
2024
2025 /* Check the current thread against the thread that reported the most recent
2026 event. If a step-over is required return TRUE and set the current thread
2027 to the old thread. Otherwise return FALSE.
2028
2029 This should be suitable for any targets that support threads. */
2030
2031 static int
2032 prepare_to_proceed (int step)
2033 {
2034 ptid_t wait_ptid;
2035 struct target_waitstatus wait_status;
2036 int schedlock_enabled;
2037
2038 /* With non-stop mode on, threads are always handled individually. */
2039 gdb_assert (! non_stop);
2040
2041 /* Get the last target status returned by target_wait(). */
2042 get_last_target_status (&wait_ptid, &wait_status);
2043
2044 /* Make sure we were stopped at a breakpoint. */
2045 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2046 || (wait_status.value.sig != GDB_SIGNAL_TRAP
2047 && wait_status.value.sig != GDB_SIGNAL_ILL
2048 && wait_status.value.sig != GDB_SIGNAL_SEGV
2049 && wait_status.value.sig != GDB_SIGNAL_EMT))
2050 {
2051 return 0;
2052 }
2053
2054 schedlock_enabled = (scheduler_mode == schedlock_on
2055 || (scheduler_mode == schedlock_step
2056 && step));
2057
2058 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2059 if (schedlock_enabled)
2060 return 0;
2061
2062 /* Don't switch over if we're about to resume some other process
2063 other than WAIT_PTID's, and schedule-multiple is off. */
2064 if (!sched_multi
2065 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2066 return 0;
2067
2068 /* Switched over from WAIT_PID. */
2069 if (!ptid_equal (wait_ptid, minus_one_ptid)
2070 && !ptid_equal (inferior_ptid, wait_ptid))
2071 {
2072 struct regcache *regcache = get_thread_regcache (wait_ptid);
2073
2074 if (breakpoint_here_p (get_regcache_aspace (regcache),
2075 regcache_read_pc (regcache)))
2076 {
2077 /* If stepping, remember current thread to switch back to. */
2078 if (step)
2079 deferred_step_ptid = inferior_ptid;
2080
2081 /* Switch back to WAIT_PID thread. */
2082 switch_to_thread (wait_ptid);
2083
2084 if (debug_infrun)
2085 fprintf_unfiltered (gdb_stdlog,
2086 "infrun: prepare_to_proceed (step=%d), "
2087 "switched to [%s]\n",
2088 step, target_pid_to_str (inferior_ptid));
2089
2090 /* We return 1 to indicate that there is a breakpoint here,
2091 so we need to step over it before continuing to avoid
2092 hitting it straight away. */
2093 return 1;
2094 }
2095 }
2096
2097 return 0;
2098 }
2099
2100 /* Basic routine for continuing the program in various fashions.
2101
2102 ADDR is the address to resume at, or -1 for resume where stopped.
2103 SIGGNAL is the signal to give it, or 0 for none,
2104 or -1 for act according to how it stopped.
2105 STEP is nonzero if should trap after one instruction.
2106 -1 means return after that and print nothing.
2107 You should probably set various step_... variables
2108 before calling here, if you are stepping.
2109
2110 You should call clear_proceed_status before calling proceed. */
2111
2112 void
2113 proceed (CORE_ADDR addr, enum gdb_signal siggnal, int step)
2114 {
2115 struct regcache *regcache;
2116 struct gdbarch *gdbarch;
2117 struct thread_info *tp;
2118 CORE_ADDR pc;
2119 struct address_space *aspace;
2120 /* GDB may force the inferior to step due to various reasons. */
2121 int force_step = 0;
2122
2123 /* If we're stopped at a fork/vfork, follow the branch set by the
2124 "set follow-fork-mode" command; otherwise, we'll just proceed
2125 resuming the current thread. */
2126 if (!follow_fork ())
2127 {
2128 /* The target for some reason decided not to resume. */
2129 normal_stop ();
2130 if (target_can_async_p ())
2131 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2132 return;
2133 }
2134
2135 /* We'll update this if & when we switch to a new thread. */
2136 previous_inferior_ptid = inferior_ptid;
2137
2138 regcache = get_current_regcache ();
2139 gdbarch = get_regcache_arch (regcache);
2140 aspace = get_regcache_aspace (regcache);
2141 pc = regcache_read_pc (regcache);
2142
2143 if (step > 0)
2144 step_start_function = find_pc_function (pc);
2145 if (step < 0)
2146 stop_after_trap = 1;
2147
2148 if (addr == (CORE_ADDR) -1)
2149 {
2150 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2151 && execution_direction != EXEC_REVERSE)
2152 /* There is a breakpoint at the address we will resume at,
2153 step one instruction before inserting breakpoints so that
2154 we do not stop right away (and report a second hit at this
2155 breakpoint).
2156
2157 Note, we don't do this in reverse, because we won't
2158 actually be executing the breakpoint insn anyway.
2159 We'll be (un-)executing the previous instruction. */
2160
2161 force_step = 1;
2162 else if (gdbarch_single_step_through_delay_p (gdbarch)
2163 && gdbarch_single_step_through_delay (gdbarch,
2164 get_current_frame ()))
2165 /* We stepped onto an instruction that needs to be stepped
2166 again before re-inserting the breakpoint, do so. */
2167 force_step = 1;
2168 }
2169 else
2170 {
2171 regcache_write_pc (regcache, addr);
2172 }
2173
2174 if (debug_infrun)
2175 fprintf_unfiltered (gdb_stdlog,
2176 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2177 paddress (gdbarch, addr), siggnal, step);
2178
2179 if (non_stop)
2180 /* In non-stop, each thread is handled individually. The context
2181 must already be set to the right thread here. */
2182 ;
2183 else
2184 {
2185 /* In a multi-threaded task we may select another thread and
2186 then continue or step.
2187
2188 But if the old thread was stopped at a breakpoint, it will
2189 immediately cause another breakpoint stop without any
2190 execution (i.e. it will report a breakpoint hit incorrectly).
2191 So we must step over it first.
2192
2193 prepare_to_proceed checks the current thread against the
2194 thread that reported the most recent event. If a step-over
2195 is required it returns TRUE and sets the current thread to
2196 the old thread. */
2197 if (prepare_to_proceed (step))
2198 force_step = 1;
2199 }
2200
2201 /* prepare_to_proceed may change the current thread. */
2202 tp = inferior_thread ();
2203
2204 if (force_step)
2205 {
2206 tp->control.trap_expected = 1;
2207 /* If displaced stepping is enabled, we can step over the
2208 breakpoint without hitting it, so leave all breakpoints
2209 inserted. Otherwise we need to disable all breakpoints, step
2210 one instruction, and then re-add them when that step is
2211 finished. */
2212 if (!use_displaced_stepping (gdbarch))
2213 remove_breakpoints ();
2214 }
2215
2216 /* We can insert breakpoints if we're not trying to step over one,
2217 or if we are stepping over one but we're using displaced stepping
2218 to do so. */
2219 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2220 insert_breakpoints ();
2221
2222 if (!non_stop)
2223 {
2224 /* Pass the last stop signal to the thread we're resuming,
2225 irrespective of whether the current thread is the thread that
2226 got the last event or not. This was historically GDB's
2227 behaviour before keeping a stop_signal per thread. */
2228
2229 struct thread_info *last_thread;
2230 ptid_t last_ptid;
2231 struct target_waitstatus last_status;
2232
2233 get_last_target_status (&last_ptid, &last_status);
2234 if (!ptid_equal (inferior_ptid, last_ptid)
2235 && !ptid_equal (last_ptid, null_ptid)
2236 && !ptid_equal (last_ptid, minus_one_ptid))
2237 {
2238 last_thread = find_thread_ptid (last_ptid);
2239 if (last_thread)
2240 {
2241 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2242 last_thread->suspend.stop_signal = GDB_SIGNAL_0;
2243 }
2244 }
2245 }
2246
2247 if (siggnal != GDB_SIGNAL_DEFAULT)
2248 tp->suspend.stop_signal = siggnal;
2249 /* If this signal should not be seen by program,
2250 give it zero. Used for debugging signals. */
2251 else if (!signal_program[tp->suspend.stop_signal])
2252 tp->suspend.stop_signal = GDB_SIGNAL_0;
2253
2254 annotate_starting ();
2255
2256 /* Make sure that output from GDB appears before output from the
2257 inferior. */
2258 gdb_flush (gdb_stdout);
2259
2260 /* Refresh prev_pc value just prior to resuming. This used to be
2261 done in stop_stepping, however, setting prev_pc there did not handle
2262 scenarios such as inferior function calls or returning from
2263 a function via the return command. In those cases, the prev_pc
2264 value was not set properly for subsequent commands. The prev_pc value
2265 is used to initialize the starting line number in the ecs. With an
2266 invalid value, the gdb next command ends up stopping at the position
2267 represented by the next line table entry past our start position.
2268 On platforms that generate one line table entry per line, this
2269 is not a problem. However, on the ia64, the compiler generates
2270 extraneous line table entries that do not increase the line number.
2271 When we issue the gdb next command on the ia64 after an inferior call
2272 or a return command, we often end up a few instructions forward, still
2273 within the original line we started.
2274
2275 An attempt was made to refresh the prev_pc at the same time the
2276 execution_control_state is initialized (for instance, just before
2277 waiting for an inferior event). But this approach did not work
2278 because of platforms that use ptrace, where the pc register cannot
2279 be read unless the inferior is stopped. At that point, we are not
2280 guaranteed the inferior is stopped and so the regcache_read_pc() call
2281 can fail. Setting the prev_pc value here ensures the value is updated
2282 correctly when the inferior is stopped. */
2283 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2284
2285 /* Fill in with reasonable starting values. */
2286 init_thread_stepping_state (tp);
2287
2288 /* Reset to normal state. */
2289 init_infwait_state ();
2290
2291 /* Resume inferior. */
2292 resume (force_step || step || bpstat_should_step (),
2293 tp->suspend.stop_signal);
2294
2295 /* Wait for it to stop (if not standalone)
2296 and in any case decode why it stopped, and act accordingly. */
2297 /* Do this only if we are not using the event loop, or if the target
2298 does not support asynchronous execution. */
2299 if (!target_can_async_p ())
2300 {
2301 wait_for_inferior ();
2302 normal_stop ();
2303 }
2304 }
2305 \f
2306
2307 /* Start remote-debugging of a machine over a serial link. */
2308
2309 void
2310 start_remote (int from_tty)
2311 {
2312 struct inferior *inferior;
2313
2314 inferior = current_inferior ();
2315 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2316
2317 /* Always go on waiting for the target, regardless of the mode. */
2318 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2319 indicate to wait_for_inferior that a target should timeout if
2320 nothing is returned (instead of just blocking). Because of this,
2321 targets expecting an immediate response need to, internally, set
2322 things up so that the target_wait() is forced to eventually
2323 timeout. */
2324 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2325 differentiate to its caller what the state of the target is after
2326 the initial open has been performed. Here we're assuming that
2327 the target has stopped. It should be possible to eventually have
2328 target_open() return to the caller an indication that the target
2329 is currently running and GDB state should be set to the same as
2330 for an async run. */
2331 wait_for_inferior ();
2332
2333 /* Now that the inferior has stopped, do any bookkeeping like
2334 loading shared libraries. We want to do this before normal_stop,
2335 so that the displayed frame is up to date. */
2336 post_create_inferior (&current_target, from_tty);
2337
2338 normal_stop ();
2339 }
2340
2341 /* Initialize static vars when a new inferior begins. */
2342
2343 void
2344 init_wait_for_inferior (void)
2345 {
2346 /* These are meaningless until the first time through wait_for_inferior. */
2347
2348 breakpoint_init_inferior (inf_starting);
2349
2350 clear_proceed_status ();
2351
2352 stepping_past_singlestep_breakpoint = 0;
2353 deferred_step_ptid = null_ptid;
2354
2355 target_last_wait_ptid = minus_one_ptid;
2356
2357 previous_inferior_ptid = inferior_ptid;
2358 init_infwait_state ();
2359
2360 /* Discard any skipped inlined frames. */
2361 clear_inline_frame_state (minus_one_ptid);
2362 }
2363
2364 \f
2365 /* This enum encodes possible reasons for doing a target_wait, so that
2366 wfi can call target_wait in one place. (Ultimately the call will be
2367 moved out of the infinite loop entirely.) */
2368
2369 enum infwait_states
2370 {
2371 infwait_normal_state,
2372 infwait_thread_hop_state,
2373 infwait_step_watch_state,
2374 infwait_nonstep_watch_state
2375 };
2376
2377 /* The PTID we'll do a target_wait on.*/
2378 ptid_t waiton_ptid;
2379
2380 /* Current inferior wait state. */
2381 static enum infwait_states infwait_state;
2382
2383 /* Data to be passed around while handling an event. This data is
2384 discarded between events. */
2385 struct execution_control_state
2386 {
2387 ptid_t ptid;
2388 /* The thread that got the event, if this was a thread event; NULL
2389 otherwise. */
2390 struct thread_info *event_thread;
2391
2392 struct target_waitstatus ws;
2393 int random_signal;
2394 int stop_func_filled_in;
2395 CORE_ADDR stop_func_start;
2396 CORE_ADDR stop_func_end;
2397 const char *stop_func_name;
2398 int wait_some_more;
2399 };
2400
2401 static void handle_inferior_event (struct execution_control_state *ecs);
2402
2403 static void handle_step_into_function (struct gdbarch *gdbarch,
2404 struct execution_control_state *ecs);
2405 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2406 struct execution_control_state *ecs);
2407 static void check_exception_resume (struct execution_control_state *,
2408 struct frame_info *);
2409
2410 static void stop_stepping (struct execution_control_state *ecs);
2411 static void prepare_to_wait (struct execution_control_state *ecs);
2412 static void keep_going (struct execution_control_state *ecs);
2413
2414 /* Callback for iterate over threads. If the thread is stopped, but
2415 the user/frontend doesn't know about that yet, go through
2416 normal_stop, as if the thread had just stopped now. ARG points at
2417 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2418 ptid_is_pid(PTID) is true, applies to all threads of the process
2419 pointed at by PTID. Otherwise, apply only to the thread pointed by
2420 PTID. */
2421
2422 static int
2423 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2424 {
2425 ptid_t ptid = * (ptid_t *) arg;
2426
2427 if ((ptid_equal (info->ptid, ptid)
2428 || ptid_equal (minus_one_ptid, ptid)
2429 || (ptid_is_pid (ptid)
2430 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2431 && is_running (info->ptid)
2432 && !is_executing (info->ptid))
2433 {
2434 struct cleanup *old_chain;
2435 struct execution_control_state ecss;
2436 struct execution_control_state *ecs = &ecss;
2437
2438 memset (ecs, 0, sizeof (*ecs));
2439
2440 old_chain = make_cleanup_restore_current_thread ();
2441
2442 /* Go through handle_inferior_event/normal_stop, so we always
2443 have consistent output as if the stop event had been
2444 reported. */
2445 ecs->ptid = info->ptid;
2446 ecs->event_thread = find_thread_ptid (info->ptid);
2447 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2448 ecs->ws.value.sig = GDB_SIGNAL_0;
2449
2450 handle_inferior_event (ecs);
2451
2452 if (!ecs->wait_some_more)
2453 {
2454 struct thread_info *tp;
2455
2456 normal_stop ();
2457
2458 /* Finish off the continuations. */
2459 tp = inferior_thread ();
2460 do_all_intermediate_continuations_thread (tp, 1);
2461 do_all_continuations_thread (tp, 1);
2462 }
2463
2464 do_cleanups (old_chain);
2465 }
2466
2467 return 0;
2468 }
2469
2470 /* This function is attached as a "thread_stop_requested" observer.
2471 Cleanup local state that assumed the PTID was to be resumed, and
2472 report the stop to the frontend. */
2473
2474 static void
2475 infrun_thread_stop_requested (ptid_t ptid)
2476 {
2477 struct displaced_step_inferior_state *displaced;
2478
2479 /* PTID was requested to stop. Remove it from the displaced
2480 stepping queue, so we don't try to resume it automatically. */
2481
2482 for (displaced = displaced_step_inferior_states;
2483 displaced;
2484 displaced = displaced->next)
2485 {
2486 struct displaced_step_request *it, **prev_next_p;
2487
2488 it = displaced->step_request_queue;
2489 prev_next_p = &displaced->step_request_queue;
2490 while (it)
2491 {
2492 if (ptid_match (it->ptid, ptid))
2493 {
2494 *prev_next_p = it->next;
2495 it->next = NULL;
2496 xfree (it);
2497 }
2498 else
2499 {
2500 prev_next_p = &it->next;
2501 }
2502
2503 it = *prev_next_p;
2504 }
2505 }
2506
2507 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2508 }
2509
2510 static void
2511 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2512 {
2513 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2514 nullify_last_target_wait_ptid ();
2515 }
2516
2517 /* Callback for iterate_over_threads. */
2518
2519 static int
2520 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2521 {
2522 if (is_exited (info->ptid))
2523 return 0;
2524
2525 delete_step_resume_breakpoint (info);
2526 delete_exception_resume_breakpoint (info);
2527 return 0;
2528 }
2529
2530 /* In all-stop, delete the step resume breakpoint of any thread that
2531 had one. In non-stop, delete the step resume breakpoint of the
2532 thread that just stopped. */
2533
2534 static void
2535 delete_step_thread_step_resume_breakpoint (void)
2536 {
2537 if (!target_has_execution
2538 || ptid_equal (inferior_ptid, null_ptid))
2539 /* If the inferior has exited, we have already deleted the step
2540 resume breakpoints out of GDB's lists. */
2541 return;
2542
2543 if (non_stop)
2544 {
2545 /* If in non-stop mode, only delete the step-resume or
2546 longjmp-resume breakpoint of the thread that just stopped
2547 stepping. */
2548 struct thread_info *tp = inferior_thread ();
2549
2550 delete_step_resume_breakpoint (tp);
2551 delete_exception_resume_breakpoint (tp);
2552 }
2553 else
2554 /* In all-stop mode, delete all step-resume and longjmp-resume
2555 breakpoints of any thread that had them. */
2556 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2557 }
2558
2559 /* A cleanup wrapper. */
2560
2561 static void
2562 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2563 {
2564 delete_step_thread_step_resume_breakpoint ();
2565 }
2566
2567 /* Pretty print the results of target_wait, for debugging purposes. */
2568
2569 static void
2570 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2571 const struct target_waitstatus *ws)
2572 {
2573 char *status_string = target_waitstatus_to_string (ws);
2574 struct ui_file *tmp_stream = mem_fileopen ();
2575 char *text;
2576
2577 /* The text is split over several lines because it was getting too long.
2578 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2579 output as a unit; we want only one timestamp printed if debug_timestamp
2580 is set. */
2581
2582 fprintf_unfiltered (tmp_stream,
2583 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2584 if (PIDGET (waiton_ptid) != -1)
2585 fprintf_unfiltered (tmp_stream,
2586 " [%s]", target_pid_to_str (waiton_ptid));
2587 fprintf_unfiltered (tmp_stream, ", status) =\n");
2588 fprintf_unfiltered (tmp_stream,
2589 "infrun: %d [%s],\n",
2590 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2591 fprintf_unfiltered (tmp_stream,
2592 "infrun: %s\n",
2593 status_string);
2594
2595 text = ui_file_xstrdup (tmp_stream, NULL);
2596
2597 /* This uses %s in part to handle %'s in the text, but also to avoid
2598 a gcc error: the format attribute requires a string literal. */
2599 fprintf_unfiltered (gdb_stdlog, "%s", text);
2600
2601 xfree (status_string);
2602 xfree (text);
2603 ui_file_delete (tmp_stream);
2604 }
2605
2606 /* Prepare and stabilize the inferior for detaching it. E.g.,
2607 detaching while a thread is displaced stepping is a recipe for
2608 crashing it, as nothing would readjust the PC out of the scratch
2609 pad. */
2610
2611 void
2612 prepare_for_detach (void)
2613 {
2614 struct inferior *inf = current_inferior ();
2615 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2616 struct cleanup *old_chain_1;
2617 struct displaced_step_inferior_state *displaced;
2618
2619 displaced = get_displaced_stepping_state (inf->pid);
2620
2621 /* Is any thread of this process displaced stepping? If not,
2622 there's nothing else to do. */
2623 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2624 return;
2625
2626 if (debug_infrun)
2627 fprintf_unfiltered (gdb_stdlog,
2628 "displaced-stepping in-process while detaching");
2629
2630 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2631 inf->detaching = 1;
2632
2633 while (!ptid_equal (displaced->step_ptid, null_ptid))
2634 {
2635 struct cleanup *old_chain_2;
2636 struct execution_control_state ecss;
2637 struct execution_control_state *ecs;
2638
2639 ecs = &ecss;
2640 memset (ecs, 0, sizeof (*ecs));
2641
2642 overlay_cache_invalid = 1;
2643
2644 if (deprecated_target_wait_hook)
2645 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2646 else
2647 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2648
2649 if (debug_infrun)
2650 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2651
2652 /* If an error happens while handling the event, propagate GDB's
2653 knowledge of the executing state to the frontend/user running
2654 state. */
2655 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2656 &minus_one_ptid);
2657
2658 /* Now figure out what to do with the result of the result. */
2659 handle_inferior_event (ecs);
2660
2661 /* No error, don't finish the state yet. */
2662 discard_cleanups (old_chain_2);
2663
2664 /* Breakpoints and watchpoints are not installed on the target
2665 at this point, and signals are passed directly to the
2666 inferior, so this must mean the process is gone. */
2667 if (!ecs->wait_some_more)
2668 {
2669 discard_cleanups (old_chain_1);
2670 error (_("Program exited while detaching"));
2671 }
2672 }
2673
2674 discard_cleanups (old_chain_1);
2675 }
2676
2677 /* Wait for control to return from inferior to debugger.
2678
2679 If inferior gets a signal, we may decide to start it up again
2680 instead of returning. That is why there is a loop in this function.
2681 When this function actually returns it means the inferior
2682 should be left stopped and GDB should read more commands. */
2683
2684 void
2685 wait_for_inferior (void)
2686 {
2687 struct cleanup *old_cleanups;
2688
2689 if (debug_infrun)
2690 fprintf_unfiltered
2691 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2692
2693 old_cleanups =
2694 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2695
2696 while (1)
2697 {
2698 struct execution_control_state ecss;
2699 struct execution_control_state *ecs = &ecss;
2700 struct cleanup *old_chain;
2701
2702 memset (ecs, 0, sizeof (*ecs));
2703
2704 overlay_cache_invalid = 1;
2705
2706 if (deprecated_target_wait_hook)
2707 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2708 else
2709 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2710
2711 if (debug_infrun)
2712 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2713
2714 /* If an error happens while handling the event, propagate GDB's
2715 knowledge of the executing state to the frontend/user running
2716 state. */
2717 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2718
2719 /* Now figure out what to do with the result of the result. */
2720 handle_inferior_event (ecs);
2721
2722 /* No error, don't finish the state yet. */
2723 discard_cleanups (old_chain);
2724
2725 if (!ecs->wait_some_more)
2726 break;
2727 }
2728
2729 do_cleanups (old_cleanups);
2730 }
2731
2732 /* Asynchronous version of wait_for_inferior. It is called by the
2733 event loop whenever a change of state is detected on the file
2734 descriptor corresponding to the target. It can be called more than
2735 once to complete a single execution command. In such cases we need
2736 to keep the state in a global variable ECSS. If it is the last time
2737 that this function is called for a single execution command, then
2738 report to the user that the inferior has stopped, and do the
2739 necessary cleanups. */
2740
2741 void
2742 fetch_inferior_event (void *client_data)
2743 {
2744 struct execution_control_state ecss;
2745 struct execution_control_state *ecs = &ecss;
2746 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2747 struct cleanup *ts_old_chain;
2748 int was_sync = sync_execution;
2749 int cmd_done = 0;
2750
2751 memset (ecs, 0, sizeof (*ecs));
2752
2753 /* We're handling a live event, so make sure we're doing live
2754 debugging. If we're looking at traceframes while the target is
2755 running, we're going to need to get back to that mode after
2756 handling the event. */
2757 if (non_stop)
2758 {
2759 make_cleanup_restore_current_traceframe ();
2760 set_current_traceframe (-1);
2761 }
2762
2763 if (non_stop)
2764 /* In non-stop mode, the user/frontend should not notice a thread
2765 switch due to internal events. Make sure we reverse to the
2766 user selected thread and frame after handling the event and
2767 running any breakpoint commands. */
2768 make_cleanup_restore_current_thread ();
2769
2770 overlay_cache_invalid = 1;
2771
2772 make_cleanup_restore_integer (&execution_direction);
2773 execution_direction = target_execution_direction ();
2774
2775 if (deprecated_target_wait_hook)
2776 ecs->ptid =
2777 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2778 else
2779 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2780
2781 if (debug_infrun)
2782 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2783
2784 /* If an error happens while handling the event, propagate GDB's
2785 knowledge of the executing state to the frontend/user running
2786 state. */
2787 if (!non_stop)
2788 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2789 else
2790 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2791
2792 /* Get executed before make_cleanup_restore_current_thread above to apply
2793 still for the thread which has thrown the exception. */
2794 make_bpstat_clear_actions_cleanup ();
2795
2796 /* Now figure out what to do with the result of the result. */
2797 handle_inferior_event (ecs);
2798
2799 if (!ecs->wait_some_more)
2800 {
2801 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2802
2803 delete_step_thread_step_resume_breakpoint ();
2804
2805 /* We may not find an inferior if this was a process exit. */
2806 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2807 normal_stop ();
2808
2809 if (target_has_execution
2810 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2811 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2812 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2813 && ecs->event_thread->step_multi
2814 && ecs->event_thread->control.stop_step)
2815 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2816 else
2817 {
2818 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2819 cmd_done = 1;
2820 }
2821 }
2822
2823 /* No error, don't finish the thread states yet. */
2824 discard_cleanups (ts_old_chain);
2825
2826 /* Revert thread and frame. */
2827 do_cleanups (old_chain);
2828
2829 /* If the inferior was in sync execution mode, and now isn't,
2830 restore the prompt (a synchronous execution command has finished,
2831 and we're ready for input). */
2832 if (interpreter_async && was_sync && !sync_execution)
2833 display_gdb_prompt (0);
2834
2835 if (cmd_done
2836 && !was_sync
2837 && exec_done_display_p
2838 && (ptid_equal (inferior_ptid, null_ptid)
2839 || !is_running (inferior_ptid)))
2840 printf_unfiltered (_("completed.\n"));
2841 }
2842
2843 /* Record the frame and location we're currently stepping through. */
2844 void
2845 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2846 {
2847 struct thread_info *tp = inferior_thread ();
2848
2849 tp->control.step_frame_id = get_frame_id (frame);
2850 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2851
2852 tp->current_symtab = sal.symtab;
2853 tp->current_line = sal.line;
2854 }
2855
2856 /* Clear context switchable stepping state. */
2857
2858 void
2859 init_thread_stepping_state (struct thread_info *tss)
2860 {
2861 tss->stepping_over_breakpoint = 0;
2862 tss->step_after_step_resume_breakpoint = 0;
2863 }
2864
2865 /* Return the cached copy of the last pid/waitstatus returned by
2866 target_wait()/deprecated_target_wait_hook(). The data is actually
2867 cached by handle_inferior_event(), which gets called immediately
2868 after target_wait()/deprecated_target_wait_hook(). */
2869
2870 void
2871 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2872 {
2873 *ptidp = target_last_wait_ptid;
2874 *status = target_last_waitstatus;
2875 }
2876
2877 void
2878 nullify_last_target_wait_ptid (void)
2879 {
2880 target_last_wait_ptid = minus_one_ptid;
2881 }
2882
2883 /* Switch thread contexts. */
2884
2885 static void
2886 context_switch (ptid_t ptid)
2887 {
2888 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2889 {
2890 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2891 target_pid_to_str (inferior_ptid));
2892 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2893 target_pid_to_str (ptid));
2894 }
2895
2896 switch_to_thread (ptid);
2897 }
2898
2899 static void
2900 adjust_pc_after_break (struct execution_control_state *ecs)
2901 {
2902 struct regcache *regcache;
2903 struct gdbarch *gdbarch;
2904 struct address_space *aspace;
2905 CORE_ADDR breakpoint_pc;
2906
2907 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2908 we aren't, just return.
2909
2910 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2911 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2912 implemented by software breakpoints should be handled through the normal
2913 breakpoint layer.
2914
2915 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2916 different signals (SIGILL or SIGEMT for instance), but it is less
2917 clear where the PC is pointing afterwards. It may not match
2918 gdbarch_decr_pc_after_break. I don't know any specific target that
2919 generates these signals at breakpoints (the code has been in GDB since at
2920 least 1992) so I can not guess how to handle them here.
2921
2922 In earlier versions of GDB, a target with
2923 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2924 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2925 target with both of these set in GDB history, and it seems unlikely to be
2926 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2927
2928 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2929 return;
2930
2931 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
2932 return;
2933
2934 /* In reverse execution, when a breakpoint is hit, the instruction
2935 under it has already been de-executed. The reported PC always
2936 points at the breakpoint address, so adjusting it further would
2937 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2938 architecture:
2939
2940 B1 0x08000000 : INSN1
2941 B2 0x08000001 : INSN2
2942 0x08000002 : INSN3
2943 PC -> 0x08000003 : INSN4
2944
2945 Say you're stopped at 0x08000003 as above. Reverse continuing
2946 from that point should hit B2 as below. Reading the PC when the
2947 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2948 been de-executed already.
2949
2950 B1 0x08000000 : INSN1
2951 B2 PC -> 0x08000001 : INSN2
2952 0x08000002 : INSN3
2953 0x08000003 : INSN4
2954
2955 We can't apply the same logic as for forward execution, because
2956 we would wrongly adjust the PC to 0x08000000, since there's a
2957 breakpoint at PC - 1. We'd then report a hit on B1, although
2958 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2959 behaviour. */
2960 if (execution_direction == EXEC_REVERSE)
2961 return;
2962
2963 /* If this target does not decrement the PC after breakpoints, then
2964 we have nothing to do. */
2965 regcache = get_thread_regcache (ecs->ptid);
2966 gdbarch = get_regcache_arch (regcache);
2967 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2968 return;
2969
2970 aspace = get_regcache_aspace (regcache);
2971
2972 /* Find the location where (if we've hit a breakpoint) the
2973 breakpoint would be. */
2974 breakpoint_pc = regcache_read_pc (regcache)
2975 - gdbarch_decr_pc_after_break (gdbarch);
2976
2977 /* Check whether there actually is a software breakpoint inserted at
2978 that location.
2979
2980 If in non-stop mode, a race condition is possible where we've
2981 removed a breakpoint, but stop events for that breakpoint were
2982 already queued and arrive later. To suppress those spurious
2983 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2984 and retire them after a number of stop events are reported. */
2985 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2986 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2987 {
2988 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
2989
2990 if (RECORD_IS_USED)
2991 record_full_gdb_operation_disable_set ();
2992
2993 /* When using hardware single-step, a SIGTRAP is reported for both
2994 a completed single-step and a software breakpoint. Need to
2995 differentiate between the two, as the latter needs adjusting
2996 but the former does not.
2997
2998 The SIGTRAP can be due to a completed hardware single-step only if
2999 - we didn't insert software single-step breakpoints
3000 - the thread to be examined is still the current thread
3001 - this thread is currently being stepped
3002
3003 If any of these events did not occur, we must have stopped due
3004 to hitting a software breakpoint, and have to back up to the
3005 breakpoint address.
3006
3007 As a special case, we could have hardware single-stepped a
3008 software breakpoint. In this case (prev_pc == breakpoint_pc),
3009 we also need to back up to the breakpoint address. */
3010
3011 if (singlestep_breakpoints_inserted_p
3012 || !ptid_equal (ecs->ptid, inferior_ptid)
3013 || !currently_stepping (ecs->event_thread)
3014 || ecs->event_thread->prev_pc == breakpoint_pc)
3015 regcache_write_pc (regcache, breakpoint_pc);
3016
3017 do_cleanups (old_cleanups);
3018 }
3019 }
3020
3021 static void
3022 init_infwait_state (void)
3023 {
3024 waiton_ptid = pid_to_ptid (-1);
3025 infwait_state = infwait_normal_state;
3026 }
3027
3028 static int
3029 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3030 {
3031 for (frame = get_prev_frame (frame);
3032 frame != NULL;
3033 frame = get_prev_frame (frame))
3034 {
3035 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3036 return 1;
3037 if (get_frame_type (frame) != INLINE_FRAME)
3038 break;
3039 }
3040
3041 return 0;
3042 }
3043
3044 /* Auxiliary function that handles syscall entry/return events.
3045 It returns 1 if the inferior should keep going (and GDB
3046 should ignore the event), or 0 if the event deserves to be
3047 processed. */
3048
3049 static int
3050 handle_syscall_event (struct execution_control_state *ecs)
3051 {
3052 struct regcache *regcache;
3053 int syscall_number;
3054
3055 if (!ptid_equal (ecs->ptid, inferior_ptid))
3056 context_switch (ecs->ptid);
3057
3058 regcache = get_thread_regcache (ecs->ptid);
3059 syscall_number = ecs->ws.value.syscall_number;
3060 stop_pc = regcache_read_pc (regcache);
3061
3062 if (catch_syscall_enabled () > 0
3063 && catching_syscall_number (syscall_number) > 0)
3064 {
3065 enum bpstat_signal_value sval;
3066
3067 if (debug_infrun)
3068 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3069 syscall_number);
3070
3071 ecs->event_thread->control.stop_bpstat
3072 = bpstat_stop_status (get_regcache_aspace (regcache),
3073 stop_pc, ecs->ptid, &ecs->ws);
3074
3075 sval = bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
3076 GDB_SIGNAL_TRAP);
3077 ecs->random_signal = sval == BPSTAT_SIGNAL_NO;
3078
3079 if (!ecs->random_signal)
3080 {
3081 /* Catchpoint hit. */
3082 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3083 return 0;
3084 }
3085 }
3086
3087 /* If no catchpoint triggered for this, then keep going. */
3088 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3089 keep_going (ecs);
3090 return 1;
3091 }
3092
3093 /* Clear the supplied execution_control_state's stop_func_* fields. */
3094
3095 static void
3096 clear_stop_func (struct execution_control_state *ecs)
3097 {
3098 ecs->stop_func_filled_in = 0;
3099 ecs->stop_func_start = 0;
3100 ecs->stop_func_end = 0;
3101 ecs->stop_func_name = NULL;
3102 }
3103
3104 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3105
3106 static void
3107 fill_in_stop_func (struct gdbarch *gdbarch,
3108 struct execution_control_state *ecs)
3109 {
3110 if (!ecs->stop_func_filled_in)
3111 {
3112 /* Don't care about return value; stop_func_start and stop_func_name
3113 will both be 0 if it doesn't work. */
3114 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3115 &ecs->stop_func_start, &ecs->stop_func_end);
3116 ecs->stop_func_start
3117 += gdbarch_deprecated_function_start_offset (gdbarch);
3118
3119 ecs->stop_func_filled_in = 1;
3120 }
3121 }
3122
3123 /* Given an execution control state that has been freshly filled in
3124 by an event from the inferior, figure out what it means and take
3125 appropriate action. */
3126
3127 static void
3128 handle_inferior_event (struct execution_control_state *ecs)
3129 {
3130 struct frame_info *frame;
3131 struct gdbarch *gdbarch;
3132 int stopped_by_watchpoint;
3133 int stepped_after_stopped_by_watchpoint = 0;
3134 struct symtab_and_line stop_pc_sal;
3135 enum stop_kind stop_soon;
3136
3137 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3138 {
3139 /* We had an event in the inferior, but we are not interested in
3140 handling it at this level. The lower layers have already
3141 done what needs to be done, if anything.
3142
3143 One of the possible circumstances for this is when the
3144 inferior produces output for the console. The inferior has
3145 not stopped, and we are ignoring the event. Another possible
3146 circumstance is any event which the lower level knows will be
3147 reported multiple times without an intervening resume. */
3148 if (debug_infrun)
3149 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3150 prepare_to_wait (ecs);
3151 return;
3152 }
3153
3154 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3155 && target_can_async_p () && !sync_execution)
3156 {
3157 /* There were no unwaited-for children left in the target, but,
3158 we're not synchronously waiting for events either. Just
3159 ignore. Otherwise, if we were running a synchronous
3160 execution command, we need to cancel it and give the user
3161 back the terminal. */
3162 if (debug_infrun)
3163 fprintf_unfiltered (gdb_stdlog,
3164 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3165 prepare_to_wait (ecs);
3166 return;
3167 }
3168
3169 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3170 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3171 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3172 {
3173 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3174
3175 gdb_assert (inf);
3176 stop_soon = inf->control.stop_soon;
3177 }
3178 else
3179 stop_soon = NO_STOP_QUIETLY;
3180
3181 /* Cache the last pid/waitstatus. */
3182 target_last_wait_ptid = ecs->ptid;
3183 target_last_waitstatus = ecs->ws;
3184
3185 /* Always clear state belonging to the previous time we stopped. */
3186 stop_stack_dummy = STOP_NONE;
3187
3188 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3189 {
3190 /* No unwaited-for children left. IOW, all resumed children
3191 have exited. */
3192 if (debug_infrun)
3193 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3194
3195 stop_print_frame = 0;
3196 stop_stepping (ecs);
3197 return;
3198 }
3199
3200 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3201 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3202 {
3203 ecs->event_thread = find_thread_ptid (ecs->ptid);
3204 /* If it's a new thread, add it to the thread database. */
3205 if (ecs->event_thread == NULL)
3206 ecs->event_thread = add_thread (ecs->ptid);
3207
3208 /* Disable range stepping. If the next step request could use a
3209 range, this will be end up re-enabled then. */
3210 ecs->event_thread->control.may_range_step = 0;
3211 }
3212
3213 /* Dependent on valid ECS->EVENT_THREAD. */
3214 adjust_pc_after_break (ecs);
3215
3216 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3217 reinit_frame_cache ();
3218
3219 breakpoint_retire_moribund ();
3220
3221 /* First, distinguish signals caused by the debugger from signals
3222 that have to do with the program's own actions. Note that
3223 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3224 on the operating system version. Here we detect when a SIGILL or
3225 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3226 something similar for SIGSEGV, since a SIGSEGV will be generated
3227 when we're trying to execute a breakpoint instruction on a
3228 non-executable stack. This happens for call dummy breakpoints
3229 for architectures like SPARC that place call dummies on the
3230 stack. */
3231 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3232 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3233 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3234 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3235 {
3236 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3237
3238 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3239 regcache_read_pc (regcache)))
3240 {
3241 if (debug_infrun)
3242 fprintf_unfiltered (gdb_stdlog,
3243 "infrun: Treating signal as SIGTRAP\n");
3244 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3245 }
3246 }
3247
3248 /* Mark the non-executing threads accordingly. In all-stop, all
3249 threads of all processes are stopped when we get any event
3250 reported. In non-stop mode, only the event thread stops. If
3251 we're handling a process exit in non-stop mode, there's nothing
3252 to do, as threads of the dead process are gone, and threads of
3253 any other process were left running. */
3254 if (!non_stop)
3255 set_executing (minus_one_ptid, 0);
3256 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3257 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3258 set_executing (ecs->ptid, 0);
3259
3260 switch (infwait_state)
3261 {
3262 case infwait_thread_hop_state:
3263 if (debug_infrun)
3264 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3265 break;
3266
3267 case infwait_normal_state:
3268 if (debug_infrun)
3269 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3270 break;
3271
3272 case infwait_step_watch_state:
3273 if (debug_infrun)
3274 fprintf_unfiltered (gdb_stdlog,
3275 "infrun: infwait_step_watch_state\n");
3276
3277 stepped_after_stopped_by_watchpoint = 1;
3278 break;
3279
3280 case infwait_nonstep_watch_state:
3281 if (debug_infrun)
3282 fprintf_unfiltered (gdb_stdlog,
3283 "infrun: infwait_nonstep_watch_state\n");
3284 insert_breakpoints ();
3285
3286 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3287 handle things like signals arriving and other things happening
3288 in combination correctly? */
3289 stepped_after_stopped_by_watchpoint = 1;
3290 break;
3291
3292 default:
3293 internal_error (__FILE__, __LINE__, _("bad switch"));
3294 }
3295
3296 infwait_state = infwait_normal_state;
3297 waiton_ptid = pid_to_ptid (-1);
3298
3299 switch (ecs->ws.kind)
3300 {
3301 case TARGET_WAITKIND_LOADED:
3302 if (debug_infrun)
3303 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3304 /* Ignore gracefully during startup of the inferior, as it might
3305 be the shell which has just loaded some objects, otherwise
3306 add the symbols for the newly loaded objects. Also ignore at
3307 the beginning of an attach or remote session; we will query
3308 the full list of libraries once the connection is
3309 established. */
3310 if (stop_soon == NO_STOP_QUIETLY)
3311 {
3312 struct regcache *regcache;
3313 enum bpstat_signal_value sval;
3314
3315 if (!ptid_equal (ecs->ptid, inferior_ptid))
3316 context_switch (ecs->ptid);
3317 regcache = get_thread_regcache (ecs->ptid);
3318
3319 handle_solib_event ();
3320
3321 ecs->event_thread->control.stop_bpstat
3322 = bpstat_stop_status (get_regcache_aspace (regcache),
3323 stop_pc, ecs->ptid, &ecs->ws);
3324
3325 sval
3326 = bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
3327 GDB_SIGNAL_TRAP);
3328 ecs->random_signal = sval == BPSTAT_SIGNAL_NO;
3329
3330 if (!ecs->random_signal)
3331 {
3332 /* A catchpoint triggered. */
3333 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3334 goto process_event_stop_test;
3335 }
3336
3337 /* If requested, stop when the dynamic linker notifies
3338 gdb of events. This allows the user to get control
3339 and place breakpoints in initializer routines for
3340 dynamically loaded objects (among other things). */
3341 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3342 if (stop_on_solib_events)
3343 {
3344 /* Make sure we print "Stopped due to solib-event" in
3345 normal_stop. */
3346 stop_print_frame = 1;
3347
3348 stop_stepping (ecs);
3349 return;
3350 }
3351 }
3352
3353 /* If we are skipping through a shell, or through shared library
3354 loading that we aren't interested in, resume the program. If
3355 we're running the program normally, also resume. But stop if
3356 we're attaching or setting up a remote connection. */
3357 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3358 {
3359 if (!ptid_equal (ecs->ptid, inferior_ptid))
3360 context_switch (ecs->ptid);
3361
3362 /* Loading of shared libraries might have changed breakpoint
3363 addresses. Make sure new breakpoints are inserted. */
3364 if (stop_soon == NO_STOP_QUIETLY
3365 && !breakpoints_always_inserted_mode ())
3366 insert_breakpoints ();
3367 resume (0, GDB_SIGNAL_0);
3368 prepare_to_wait (ecs);
3369 return;
3370 }
3371
3372 break;
3373
3374 case TARGET_WAITKIND_SPURIOUS:
3375 if (debug_infrun)
3376 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3377 if (!ptid_equal (ecs->ptid, inferior_ptid))
3378 context_switch (ecs->ptid);
3379 resume (0, GDB_SIGNAL_0);
3380 prepare_to_wait (ecs);
3381 return;
3382
3383 case TARGET_WAITKIND_EXITED:
3384 case TARGET_WAITKIND_SIGNALLED:
3385 if (debug_infrun)
3386 {
3387 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3388 fprintf_unfiltered (gdb_stdlog,
3389 "infrun: TARGET_WAITKIND_EXITED\n");
3390 else
3391 fprintf_unfiltered (gdb_stdlog,
3392 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3393 }
3394
3395 inferior_ptid = ecs->ptid;
3396 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3397 set_current_program_space (current_inferior ()->pspace);
3398 handle_vfork_child_exec_or_exit (0);
3399 target_terminal_ours (); /* Must do this before mourn anyway. */
3400
3401 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3402 {
3403 /* Record the exit code in the convenience variable $_exitcode, so
3404 that the user can inspect this again later. */
3405 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3406 (LONGEST) ecs->ws.value.integer);
3407
3408 /* Also record this in the inferior itself. */
3409 current_inferior ()->has_exit_code = 1;
3410 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3411
3412 print_exited_reason (ecs->ws.value.integer);
3413 }
3414 else
3415 print_signal_exited_reason (ecs->ws.value.sig);
3416
3417 gdb_flush (gdb_stdout);
3418 target_mourn_inferior ();
3419 singlestep_breakpoints_inserted_p = 0;
3420 cancel_single_step_breakpoints ();
3421 stop_print_frame = 0;
3422 stop_stepping (ecs);
3423 return;
3424
3425 /* The following are the only cases in which we keep going;
3426 the above cases end in a continue or goto. */
3427 case TARGET_WAITKIND_FORKED:
3428 case TARGET_WAITKIND_VFORKED:
3429 if (debug_infrun)
3430 {
3431 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3432 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3433 else
3434 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3435 }
3436
3437 /* Check whether the inferior is displaced stepping. */
3438 {
3439 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3440 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3441 struct displaced_step_inferior_state *displaced
3442 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3443
3444 /* If checking displaced stepping is supported, and thread
3445 ecs->ptid is displaced stepping. */
3446 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3447 {
3448 struct inferior *parent_inf
3449 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3450 struct regcache *child_regcache;
3451 CORE_ADDR parent_pc;
3452
3453 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3454 indicating that the displaced stepping of syscall instruction
3455 has been done. Perform cleanup for parent process here. Note
3456 that this operation also cleans up the child process for vfork,
3457 because their pages are shared. */
3458 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
3459
3460 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3461 {
3462 /* Restore scratch pad for child process. */
3463 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3464 }
3465
3466 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3467 the child's PC is also within the scratchpad. Set the child's PC
3468 to the parent's PC value, which has already been fixed up.
3469 FIXME: we use the parent's aspace here, although we're touching
3470 the child, because the child hasn't been added to the inferior
3471 list yet at this point. */
3472
3473 child_regcache
3474 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3475 gdbarch,
3476 parent_inf->aspace);
3477 /* Read PC value of parent process. */
3478 parent_pc = regcache_read_pc (regcache);
3479
3480 if (debug_displaced)
3481 fprintf_unfiltered (gdb_stdlog,
3482 "displaced: write child pc from %s to %s\n",
3483 paddress (gdbarch,
3484 regcache_read_pc (child_regcache)),
3485 paddress (gdbarch, parent_pc));
3486
3487 regcache_write_pc (child_regcache, parent_pc);
3488 }
3489 }
3490
3491 if (!ptid_equal (ecs->ptid, inferior_ptid))
3492 context_switch (ecs->ptid);
3493
3494 /* Immediately detach breakpoints from the child before there's
3495 any chance of letting the user delete breakpoints from the
3496 breakpoint lists. If we don't do this early, it's easy to
3497 leave left over traps in the child, vis: "break foo; catch
3498 fork; c; <fork>; del; c; <child calls foo>". We only follow
3499 the fork on the last `continue', and by that time the
3500 breakpoint at "foo" is long gone from the breakpoint table.
3501 If we vforked, then we don't need to unpatch here, since both
3502 parent and child are sharing the same memory pages; we'll
3503 need to unpatch at follow/detach time instead to be certain
3504 that new breakpoints added between catchpoint hit time and
3505 vfork follow are detached. */
3506 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3507 {
3508 /* This won't actually modify the breakpoint list, but will
3509 physically remove the breakpoints from the child. */
3510 detach_breakpoints (ecs->ws.value.related_pid);
3511 }
3512
3513 if (singlestep_breakpoints_inserted_p)
3514 {
3515 /* Pull the single step breakpoints out of the target. */
3516 remove_single_step_breakpoints ();
3517 singlestep_breakpoints_inserted_p = 0;
3518 }
3519
3520 /* In case the event is caught by a catchpoint, remember that
3521 the event is to be followed at the next resume of the thread,
3522 and not immediately. */
3523 ecs->event_thread->pending_follow = ecs->ws;
3524
3525 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3526
3527 ecs->event_thread->control.stop_bpstat
3528 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3529 stop_pc, ecs->ptid, &ecs->ws);
3530
3531 /* Note that we're interested in knowing the bpstat actually
3532 causes a stop, not just if it may explain the signal.
3533 Software watchpoints, for example, always appear in the
3534 bpstat. */
3535 ecs->random_signal
3536 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3537
3538 /* If no catchpoint triggered for this, then keep going. */
3539 if (ecs->random_signal)
3540 {
3541 ptid_t parent;
3542 ptid_t child;
3543 int should_resume;
3544 int follow_child
3545 = (follow_fork_mode_string == follow_fork_mode_child);
3546
3547 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3548
3549 should_resume = follow_fork ();
3550
3551 parent = ecs->ptid;
3552 child = ecs->ws.value.related_pid;
3553
3554 /* In non-stop mode, also resume the other branch. */
3555 if (non_stop && !detach_fork)
3556 {
3557 if (follow_child)
3558 switch_to_thread (parent);
3559 else
3560 switch_to_thread (child);
3561
3562 ecs->event_thread = inferior_thread ();
3563 ecs->ptid = inferior_ptid;
3564 keep_going (ecs);
3565 }
3566
3567 if (follow_child)
3568 switch_to_thread (child);
3569 else
3570 switch_to_thread (parent);
3571
3572 ecs->event_thread = inferior_thread ();
3573 ecs->ptid = inferior_ptid;
3574
3575 if (should_resume)
3576 keep_going (ecs);
3577 else
3578 stop_stepping (ecs);
3579 return;
3580 }
3581 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3582 goto process_event_stop_test;
3583
3584 case TARGET_WAITKIND_VFORK_DONE:
3585 /* Done with the shared memory region. Re-insert breakpoints in
3586 the parent, and keep going. */
3587
3588 if (debug_infrun)
3589 fprintf_unfiltered (gdb_stdlog,
3590 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3591
3592 if (!ptid_equal (ecs->ptid, inferior_ptid))
3593 context_switch (ecs->ptid);
3594
3595 current_inferior ()->waiting_for_vfork_done = 0;
3596 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3597 /* This also takes care of reinserting breakpoints in the
3598 previously locked inferior. */
3599 keep_going (ecs);
3600 return;
3601
3602 case TARGET_WAITKIND_EXECD:
3603 if (debug_infrun)
3604 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3605
3606 if (!ptid_equal (ecs->ptid, inferior_ptid))
3607 context_switch (ecs->ptid);
3608
3609 singlestep_breakpoints_inserted_p = 0;
3610 cancel_single_step_breakpoints ();
3611
3612 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3613
3614 /* Do whatever is necessary to the parent branch of the vfork. */
3615 handle_vfork_child_exec_or_exit (1);
3616
3617 /* This causes the eventpoints and symbol table to be reset.
3618 Must do this now, before trying to determine whether to
3619 stop. */
3620 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3621
3622 ecs->event_thread->control.stop_bpstat
3623 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3624 stop_pc, ecs->ptid, &ecs->ws);
3625 ecs->random_signal
3626 = (bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
3627 GDB_SIGNAL_TRAP)
3628 == BPSTAT_SIGNAL_NO);
3629
3630 /* Note that this may be referenced from inside
3631 bpstat_stop_status above, through inferior_has_execd. */
3632 xfree (ecs->ws.value.execd_pathname);
3633 ecs->ws.value.execd_pathname = NULL;
3634
3635 /* If no catchpoint triggered for this, then keep going. */
3636 if (ecs->random_signal)
3637 {
3638 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3639 keep_going (ecs);
3640 return;
3641 }
3642 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
3643 goto process_event_stop_test;
3644
3645 /* Be careful not to try to gather much state about a thread
3646 that's in a syscall. It's frequently a losing proposition. */
3647 case TARGET_WAITKIND_SYSCALL_ENTRY:
3648 if (debug_infrun)
3649 fprintf_unfiltered (gdb_stdlog,
3650 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3651 /* Getting the current syscall number. */
3652 if (handle_syscall_event (ecs) != 0)
3653 return;
3654 goto process_event_stop_test;
3655
3656 /* Before examining the threads further, step this thread to
3657 get it entirely out of the syscall. (We get notice of the
3658 event when the thread is just on the verge of exiting a
3659 syscall. Stepping one instruction seems to get it back
3660 into user code.) */
3661 case TARGET_WAITKIND_SYSCALL_RETURN:
3662 if (debug_infrun)
3663 fprintf_unfiltered (gdb_stdlog,
3664 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3665 if (handle_syscall_event (ecs) != 0)
3666 return;
3667 goto process_event_stop_test;
3668
3669 case TARGET_WAITKIND_STOPPED:
3670 if (debug_infrun)
3671 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3672 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3673 break;
3674
3675 case TARGET_WAITKIND_NO_HISTORY:
3676 if (debug_infrun)
3677 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3678 /* Reverse execution: target ran out of history info. */
3679
3680 /* Pull the single step breakpoints out of the target. */
3681 if (singlestep_breakpoints_inserted_p)
3682 {
3683 if (!ptid_equal (ecs->ptid, inferior_ptid))
3684 context_switch (ecs->ptid);
3685 remove_single_step_breakpoints ();
3686 singlestep_breakpoints_inserted_p = 0;
3687 }
3688 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3689 print_no_history_reason ();
3690 stop_stepping (ecs);
3691 return;
3692 }
3693
3694 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3695 {
3696 /* Do we need to clean up the state of a thread that has
3697 completed a displaced single-step? (Doing so usually affects
3698 the PC, so do it here, before we set stop_pc.) */
3699 displaced_step_fixup (ecs->ptid,
3700 ecs->event_thread->suspend.stop_signal);
3701
3702 /* If we either finished a single-step or hit a breakpoint, but
3703 the user wanted this thread to be stopped, pretend we got a
3704 SIG0 (generic unsignaled stop). */
3705
3706 if (ecs->event_thread->stop_requested
3707 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3708 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3709 }
3710
3711 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3712
3713 if (debug_infrun)
3714 {
3715 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3716 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3717 struct cleanup *old_chain = save_inferior_ptid ();
3718
3719 inferior_ptid = ecs->ptid;
3720
3721 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3722 paddress (gdbarch, stop_pc));
3723 if (target_stopped_by_watchpoint ())
3724 {
3725 CORE_ADDR addr;
3726
3727 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3728
3729 if (target_stopped_data_address (&current_target, &addr))
3730 fprintf_unfiltered (gdb_stdlog,
3731 "infrun: stopped data address = %s\n",
3732 paddress (gdbarch, addr));
3733 else
3734 fprintf_unfiltered (gdb_stdlog,
3735 "infrun: (no data address available)\n");
3736 }
3737
3738 do_cleanups (old_chain);
3739 }
3740
3741 if (stepping_past_singlestep_breakpoint)
3742 {
3743 gdb_assert (singlestep_breakpoints_inserted_p);
3744 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3745 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3746
3747 stepping_past_singlestep_breakpoint = 0;
3748
3749 /* We've either finished single-stepping past the single-step
3750 breakpoint, or stopped for some other reason. It would be nice if
3751 we could tell, but we can't reliably. */
3752 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3753 {
3754 if (debug_infrun)
3755 fprintf_unfiltered (gdb_stdlog,
3756 "infrun: stepping_past_"
3757 "singlestep_breakpoint\n");
3758 /* Pull the single step breakpoints out of the target. */
3759 if (!ptid_equal (ecs->ptid, inferior_ptid))
3760 context_switch (ecs->ptid);
3761 remove_single_step_breakpoints ();
3762 singlestep_breakpoints_inserted_p = 0;
3763
3764 ecs->random_signal = 0;
3765 ecs->event_thread->control.trap_expected = 0;
3766
3767 context_switch (saved_singlestep_ptid);
3768 if (deprecated_context_hook)
3769 deprecated_context_hook (pid_to_thread_id (saved_singlestep_ptid));
3770
3771 resume (1, GDB_SIGNAL_0);
3772 prepare_to_wait (ecs);
3773 return;
3774 }
3775 }
3776
3777 if (!ptid_equal (deferred_step_ptid, null_ptid))
3778 {
3779 /* In non-stop mode, there's never a deferred_step_ptid set. */
3780 gdb_assert (!non_stop);
3781
3782 /* If we stopped for some other reason than single-stepping, ignore
3783 the fact that we were supposed to switch back. */
3784 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3785 {
3786 if (debug_infrun)
3787 fprintf_unfiltered (gdb_stdlog,
3788 "infrun: handling deferred step\n");
3789
3790 /* Pull the single step breakpoints out of the target. */
3791 if (singlestep_breakpoints_inserted_p)
3792 {
3793 if (!ptid_equal (ecs->ptid, inferior_ptid))
3794 context_switch (ecs->ptid);
3795 remove_single_step_breakpoints ();
3796 singlestep_breakpoints_inserted_p = 0;
3797 }
3798
3799 ecs->event_thread->control.trap_expected = 0;
3800
3801 context_switch (deferred_step_ptid);
3802 deferred_step_ptid = null_ptid;
3803 /* Suppress spurious "Switching to ..." message. */
3804 previous_inferior_ptid = inferior_ptid;
3805
3806 resume (1, GDB_SIGNAL_0);
3807 prepare_to_wait (ecs);
3808 return;
3809 }
3810
3811 deferred_step_ptid = null_ptid;
3812 }
3813
3814 /* See if a thread hit a thread-specific breakpoint that was meant for
3815 another thread. If so, then step that thread past the breakpoint,
3816 and continue it. */
3817
3818 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
3819 {
3820 int thread_hop_needed = 0;
3821 struct address_space *aspace =
3822 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3823
3824 /* Check if a regular breakpoint has been hit before checking
3825 for a potential single step breakpoint. Otherwise, GDB will
3826 not see this breakpoint hit when stepping onto breakpoints. */
3827 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3828 {
3829 ecs->random_signal = 0;
3830 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3831 thread_hop_needed = 1;
3832 }
3833 else if (singlestep_breakpoints_inserted_p)
3834 {
3835 /* We have not context switched yet, so this should be true
3836 no matter which thread hit the singlestep breakpoint. */
3837 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3838 if (debug_infrun)
3839 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3840 "trap for %s\n",
3841 target_pid_to_str (ecs->ptid));
3842
3843 ecs->random_signal = 0;
3844 /* The call to in_thread_list is necessary because PTIDs sometimes
3845 change when we go from single-threaded to multi-threaded. If
3846 the singlestep_ptid is still in the list, assume that it is
3847 really different from ecs->ptid. */
3848 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3849 && in_thread_list (singlestep_ptid))
3850 {
3851 /* If the PC of the thread we were trying to single-step
3852 has changed, discard this event (which we were going
3853 to ignore anyway), and pretend we saw that thread
3854 trap. This prevents us continuously moving the
3855 single-step breakpoint forward, one instruction at a
3856 time. If the PC has changed, then the thread we were
3857 trying to single-step has trapped or been signalled,
3858 but the event has not been reported to GDB yet.
3859
3860 There might be some cases where this loses signal
3861 information, if a signal has arrived at exactly the
3862 same time that the PC changed, but this is the best
3863 we can do with the information available. Perhaps we
3864 should arrange to report all events for all threads
3865 when they stop, or to re-poll the remote looking for
3866 this particular thread (i.e. temporarily enable
3867 schedlock). */
3868
3869 CORE_ADDR new_singlestep_pc
3870 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3871
3872 if (new_singlestep_pc != singlestep_pc)
3873 {
3874 enum gdb_signal stop_signal;
3875
3876 if (debug_infrun)
3877 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3878 " but expected thread advanced also\n");
3879
3880 /* The current context still belongs to
3881 singlestep_ptid. Don't swap here, since that's
3882 the context we want to use. Just fudge our
3883 state and continue. */
3884 stop_signal = ecs->event_thread->suspend.stop_signal;
3885 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3886 ecs->ptid = singlestep_ptid;
3887 ecs->event_thread = find_thread_ptid (ecs->ptid);
3888 ecs->event_thread->suspend.stop_signal = stop_signal;
3889 stop_pc = new_singlestep_pc;
3890 }
3891 else
3892 {
3893 if (debug_infrun)
3894 fprintf_unfiltered (gdb_stdlog,
3895 "infrun: unexpected thread\n");
3896
3897 thread_hop_needed = 1;
3898 stepping_past_singlestep_breakpoint = 1;
3899 saved_singlestep_ptid = singlestep_ptid;
3900 }
3901 }
3902 }
3903
3904 if (thread_hop_needed)
3905 {
3906 struct regcache *thread_regcache;
3907 int remove_status = 0;
3908
3909 if (debug_infrun)
3910 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3911
3912 /* Switch context before touching inferior memory, the
3913 previous thread may have exited. */
3914 if (!ptid_equal (inferior_ptid, ecs->ptid))
3915 context_switch (ecs->ptid);
3916
3917 /* Saw a breakpoint, but it was hit by the wrong thread.
3918 Just continue. */
3919
3920 if (singlestep_breakpoints_inserted_p)
3921 {
3922 /* Pull the single step breakpoints out of the target. */
3923 remove_single_step_breakpoints ();
3924 singlestep_breakpoints_inserted_p = 0;
3925 }
3926
3927 /* If the arch can displace step, don't remove the
3928 breakpoints. */
3929 thread_regcache = get_thread_regcache (ecs->ptid);
3930 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3931 remove_status = remove_breakpoints ();
3932
3933 /* Did we fail to remove breakpoints? If so, try
3934 to set the PC past the bp. (There's at least
3935 one situation in which we can fail to remove
3936 the bp's: On HP-UX's that use ttrace, we can't
3937 change the address space of a vforking child
3938 process until the child exits (well, okay, not
3939 then either :-) or execs. */
3940 if (remove_status != 0)
3941 error (_("Cannot step over breakpoint hit in wrong thread"));
3942 else
3943 { /* Single step */
3944 if (!non_stop)
3945 {
3946 /* Only need to require the next event from this
3947 thread in all-stop mode. */
3948 waiton_ptid = ecs->ptid;
3949 infwait_state = infwait_thread_hop_state;
3950 }
3951
3952 ecs->event_thread->stepping_over_breakpoint = 1;
3953 keep_going (ecs);
3954 return;
3955 }
3956 }
3957 else if (singlestep_breakpoints_inserted_p)
3958 {
3959 ecs->random_signal = 0;
3960 }
3961 }
3962 else
3963 ecs->random_signal = 1;
3964
3965 /* See if something interesting happened to the non-current thread. If
3966 so, then switch to that thread. */
3967 if (!ptid_equal (ecs->ptid, inferior_ptid))
3968 {
3969 if (debug_infrun)
3970 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3971
3972 context_switch (ecs->ptid);
3973
3974 if (deprecated_context_hook)
3975 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3976 }
3977
3978 /* At this point, get hold of the now-current thread's frame. */
3979 frame = get_current_frame ();
3980 gdbarch = get_frame_arch (frame);
3981
3982 if (singlestep_breakpoints_inserted_p)
3983 {
3984 /* Pull the single step breakpoints out of the target. */
3985 remove_single_step_breakpoints ();
3986 singlestep_breakpoints_inserted_p = 0;
3987 }
3988
3989 if (stepped_after_stopped_by_watchpoint)
3990 stopped_by_watchpoint = 0;
3991 else
3992 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3993
3994 /* If necessary, step over this watchpoint. We'll be back to display
3995 it in a moment. */
3996 if (stopped_by_watchpoint
3997 && (target_have_steppable_watchpoint
3998 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3999 {
4000 /* At this point, we are stopped at an instruction which has
4001 attempted to write to a piece of memory under control of
4002 a watchpoint. The instruction hasn't actually executed
4003 yet. If we were to evaluate the watchpoint expression
4004 now, we would get the old value, and therefore no change
4005 would seem to have occurred.
4006
4007 In order to make watchpoints work `right', we really need
4008 to complete the memory write, and then evaluate the
4009 watchpoint expression. We do this by single-stepping the
4010 target.
4011
4012 It may not be necessary to disable the watchpoint to stop over
4013 it. For example, the PA can (with some kernel cooperation)
4014 single step over a watchpoint without disabling the watchpoint.
4015
4016 It is far more common to need to disable a watchpoint to step
4017 the inferior over it. If we have non-steppable watchpoints,
4018 we must disable the current watchpoint; it's simplest to
4019 disable all watchpoints and breakpoints. */
4020 int hw_step = 1;
4021
4022 if (!target_have_steppable_watchpoint)
4023 {
4024 remove_breakpoints ();
4025 /* See comment in resume why we need to stop bypassing signals
4026 while breakpoints have been removed. */
4027 target_pass_signals (0, NULL);
4028 }
4029 /* Single step */
4030 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4031 target_resume (ecs->ptid, hw_step, GDB_SIGNAL_0);
4032 waiton_ptid = ecs->ptid;
4033 if (target_have_steppable_watchpoint)
4034 infwait_state = infwait_step_watch_state;
4035 else
4036 infwait_state = infwait_nonstep_watch_state;
4037 prepare_to_wait (ecs);
4038 return;
4039 }
4040
4041 clear_stop_func (ecs);
4042 ecs->event_thread->stepping_over_breakpoint = 0;
4043 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4044 ecs->event_thread->control.stop_step = 0;
4045 stop_print_frame = 1;
4046 ecs->random_signal = 0;
4047 stopped_by_random_signal = 0;
4048
4049 /* Hide inlined functions starting here, unless we just performed stepi or
4050 nexti. After stepi and nexti, always show the innermost frame (not any
4051 inline function call sites). */
4052 if (ecs->event_thread->control.step_range_end != 1)
4053 {
4054 struct address_space *aspace =
4055 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4056
4057 /* skip_inline_frames is expensive, so we avoid it if we can
4058 determine that the address is one where functions cannot have
4059 been inlined. This improves performance with inferiors that
4060 load a lot of shared libraries, because the solib event
4061 breakpoint is defined as the address of a function (i.e. not
4062 inline). Note that we have to check the previous PC as well
4063 as the current one to catch cases when we have just
4064 single-stepped off a breakpoint prior to reinstating it.
4065 Note that we're assuming that the code we single-step to is
4066 not inline, but that's not definitive: there's nothing
4067 preventing the event breakpoint function from containing
4068 inlined code, and the single-step ending up there. If the
4069 user had set a breakpoint on that inlined code, the missing
4070 skip_inline_frames call would break things. Fortunately
4071 that's an extremely unlikely scenario. */
4072 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4073 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4074 && ecs->event_thread->control.trap_expected
4075 && pc_at_non_inline_function (aspace,
4076 ecs->event_thread->prev_pc,
4077 &ecs->ws)))
4078 {
4079 skip_inline_frames (ecs->ptid);
4080
4081 /* Re-fetch current thread's frame in case that invalidated
4082 the frame cache. */
4083 frame = get_current_frame ();
4084 gdbarch = get_frame_arch (frame);
4085 }
4086 }
4087
4088 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4089 && ecs->event_thread->control.trap_expected
4090 && gdbarch_single_step_through_delay_p (gdbarch)
4091 && currently_stepping (ecs->event_thread))
4092 {
4093 /* We're trying to step off a breakpoint. Turns out that we're
4094 also on an instruction that needs to be stepped multiple
4095 times before it's been fully executing. E.g., architectures
4096 with a delay slot. It needs to be stepped twice, once for
4097 the instruction and once for the delay slot. */
4098 int step_through_delay
4099 = gdbarch_single_step_through_delay (gdbarch, frame);
4100
4101 if (debug_infrun && step_through_delay)
4102 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4103 if (ecs->event_thread->control.step_range_end == 0
4104 && step_through_delay)
4105 {
4106 /* The user issued a continue when stopped at a breakpoint.
4107 Set up for another trap and get out of here. */
4108 ecs->event_thread->stepping_over_breakpoint = 1;
4109 keep_going (ecs);
4110 return;
4111 }
4112 else if (step_through_delay)
4113 {
4114 /* The user issued a step when stopped at a breakpoint.
4115 Maybe we should stop, maybe we should not - the delay
4116 slot *might* correspond to a line of source. In any
4117 case, don't decide that here, just set
4118 ecs->stepping_over_breakpoint, making sure we
4119 single-step again before breakpoints are re-inserted. */
4120 ecs->event_thread->stepping_over_breakpoint = 1;
4121 }
4122 }
4123
4124 /* Look at the cause of the stop, and decide what to do.
4125 The alternatives are:
4126 1) stop_stepping and return; to really stop and return to the debugger,
4127 2) keep_going and return to start up again
4128 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4129 3) set ecs->random_signal to 1, and the decision between 1 and 2
4130 will be made according to the signal handling tables. */
4131
4132 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4133 && stop_after_trap)
4134 {
4135 if (debug_infrun)
4136 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4137 stop_print_frame = 0;
4138 stop_stepping (ecs);
4139 return;
4140 }
4141
4142 /* This is originated from start_remote(), start_inferior() and
4143 shared libraries hook functions. */
4144 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4145 {
4146 if (debug_infrun)
4147 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4148 stop_stepping (ecs);
4149 return;
4150 }
4151
4152 /* This originates from attach_command(). We need to overwrite
4153 the stop_signal here, because some kernels don't ignore a
4154 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4155 See more comments in inferior.h. On the other hand, if we
4156 get a non-SIGSTOP, report it to the user - assume the backend
4157 will handle the SIGSTOP if it should show up later.
4158
4159 Also consider that the attach is complete when we see a
4160 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4161 target extended-remote report it instead of a SIGSTOP
4162 (e.g. gdbserver). We already rely on SIGTRAP being our
4163 signal, so this is no exception.
4164
4165 Also consider that the attach is complete when we see a
4166 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4167 the target to stop all threads of the inferior, in case the
4168 low level attach operation doesn't stop them implicitly. If
4169 they weren't stopped implicitly, then the stub will report a
4170 GDB_SIGNAL_0, meaning: stopped for no particular reason
4171 other than GDB's request. */
4172 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4173 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4174 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4175 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4176 {
4177 stop_stepping (ecs);
4178 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4179 return;
4180 }
4181
4182 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4183 handles this event. */
4184 ecs->event_thread->control.stop_bpstat
4185 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4186 stop_pc, ecs->ptid, &ecs->ws);
4187
4188 /* Following in case break condition called a
4189 function. */
4190 stop_print_frame = 1;
4191
4192 /* This is where we handle "moribund" watchpoints. Unlike
4193 software breakpoints traps, hardware watchpoint traps are
4194 always distinguishable from random traps. If no high-level
4195 watchpoint is associated with the reported stop data address
4196 anymore, then the bpstat does not explain the signal ---
4197 simply make sure to ignore it if `stopped_by_watchpoint' is
4198 set. */
4199
4200 if (debug_infrun
4201 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4202 && (bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4203 GDB_SIGNAL_TRAP)
4204 == BPSTAT_SIGNAL_NO)
4205 && stopped_by_watchpoint)
4206 fprintf_unfiltered (gdb_stdlog,
4207 "infrun: no user watchpoint explains "
4208 "watchpoint SIGTRAP, ignoring\n");
4209
4210 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4211 at one stage in the past included checks for an inferior
4212 function call's call dummy's return breakpoint. The original
4213 comment, that went with the test, read:
4214
4215 ``End of a stack dummy. Some systems (e.g. Sony news) give
4216 another signal besides SIGTRAP, so check here as well as
4217 above.''
4218
4219 If someone ever tries to get call dummys on a
4220 non-executable stack to work (where the target would stop
4221 with something like a SIGSEGV), then those tests might need
4222 to be re-instated. Given, however, that the tests were only
4223 enabled when momentary breakpoints were not being used, I
4224 suspect that it won't be the case.
4225
4226 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4227 be necessary for call dummies on a non-executable stack on
4228 SPARC. */
4229
4230 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4231 ecs->random_signal
4232 = !((bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4233 GDB_SIGNAL_TRAP)
4234 != BPSTAT_SIGNAL_NO)
4235 || stopped_by_watchpoint
4236 || ecs->event_thread->control.trap_expected
4237 || (ecs->event_thread->control.step_range_end
4238 && (ecs->event_thread->control.step_resume_breakpoint
4239 == NULL)));
4240 else
4241 {
4242 enum bpstat_signal_value sval;
4243
4244 sval = bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4245 ecs->event_thread->suspend.stop_signal);
4246 ecs->random_signal = (sval == BPSTAT_SIGNAL_NO);
4247
4248 if (sval == BPSTAT_SIGNAL_HIDE)
4249 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_TRAP;
4250 }
4251
4252 process_event_stop_test:
4253
4254 /* Re-fetch current thread's frame in case we did a
4255 "goto process_event_stop_test" above. */
4256 frame = get_current_frame ();
4257 gdbarch = get_frame_arch (frame);
4258
4259 /* For the program's own signals, act according to
4260 the signal handling tables. */
4261
4262 if (ecs->random_signal)
4263 {
4264 /* Signal not for debugging purposes. */
4265 int printed = 0;
4266 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4267
4268 if (debug_infrun)
4269 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4270 ecs->event_thread->suspend.stop_signal);
4271
4272 stopped_by_random_signal = 1;
4273
4274 if (signal_print[ecs->event_thread->suspend.stop_signal])
4275 {
4276 printed = 1;
4277 target_terminal_ours_for_output ();
4278 print_signal_received_reason
4279 (ecs->event_thread->suspend.stop_signal);
4280 }
4281 /* Always stop on signals if we're either just gaining control
4282 of the program, or the user explicitly requested this thread
4283 to remain stopped. */
4284 if (stop_soon != NO_STOP_QUIETLY
4285 || ecs->event_thread->stop_requested
4286 || (!inf->detaching
4287 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4288 {
4289 stop_stepping (ecs);
4290 return;
4291 }
4292 /* If not going to stop, give terminal back
4293 if we took it away. */
4294 else if (printed)
4295 target_terminal_inferior ();
4296
4297 /* Clear the signal if it should not be passed. */
4298 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4299 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4300
4301 if (ecs->event_thread->prev_pc == stop_pc
4302 && ecs->event_thread->control.trap_expected
4303 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4304 {
4305 /* We were just starting a new sequence, attempting to
4306 single-step off of a breakpoint and expecting a SIGTRAP.
4307 Instead this signal arrives. This signal will take us out
4308 of the stepping range so GDB needs to remember to, when
4309 the signal handler returns, resume stepping off that
4310 breakpoint. */
4311 /* To simplify things, "continue" is forced to use the same
4312 code paths as single-step - set a breakpoint at the
4313 signal return address and then, once hit, step off that
4314 breakpoint. */
4315 if (debug_infrun)
4316 fprintf_unfiltered (gdb_stdlog,
4317 "infrun: signal arrived while stepping over "
4318 "breakpoint\n");
4319
4320 insert_hp_step_resume_breakpoint_at_frame (frame);
4321 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4322 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4323 ecs->event_thread->control.trap_expected = 0;
4324 keep_going (ecs);
4325 return;
4326 }
4327
4328 if (ecs->event_thread->control.step_range_end != 0
4329 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4330 && pc_in_thread_step_range (stop_pc, ecs->event_thread)
4331 && frame_id_eq (get_stack_frame_id (frame),
4332 ecs->event_thread->control.step_stack_frame_id)
4333 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4334 {
4335 /* The inferior is about to take a signal that will take it
4336 out of the single step range. Set a breakpoint at the
4337 current PC (which is presumably where the signal handler
4338 will eventually return) and then allow the inferior to
4339 run free.
4340
4341 Note that this is only needed for a signal delivered
4342 while in the single-step range. Nested signals aren't a
4343 problem as they eventually all return. */
4344 if (debug_infrun)
4345 fprintf_unfiltered (gdb_stdlog,
4346 "infrun: signal may take us out of "
4347 "single-step range\n");
4348
4349 insert_hp_step_resume_breakpoint_at_frame (frame);
4350 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4351 ecs->event_thread->control.trap_expected = 0;
4352 keep_going (ecs);
4353 return;
4354 }
4355
4356 /* Note: step_resume_breakpoint may be non-NULL. This occures
4357 when either there's a nested signal, or when there's a
4358 pending signal enabled just as the signal handler returns
4359 (leaving the inferior at the step-resume-breakpoint without
4360 actually executing it). Either way continue until the
4361 breakpoint is really hit. */
4362 }
4363 else
4364 {
4365 /* Handle cases caused by hitting a breakpoint. */
4366
4367 CORE_ADDR jmp_buf_pc;
4368 struct bpstat_what what;
4369
4370 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4371
4372 if (what.call_dummy)
4373 {
4374 stop_stack_dummy = what.call_dummy;
4375 }
4376
4377 /* If we hit an internal event that triggers symbol changes, the
4378 current frame will be invalidated within bpstat_what (e.g.,
4379 if we hit an internal solib event). Re-fetch it. */
4380 frame = get_current_frame ();
4381 gdbarch = get_frame_arch (frame);
4382
4383 switch (what.main_action)
4384 {
4385 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4386 /* If we hit the breakpoint at longjmp while stepping, we
4387 install a momentary breakpoint at the target of the
4388 jmp_buf. */
4389
4390 if (debug_infrun)
4391 fprintf_unfiltered (gdb_stdlog,
4392 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4393
4394 ecs->event_thread->stepping_over_breakpoint = 1;
4395
4396 if (what.is_longjmp)
4397 {
4398 struct value *arg_value;
4399
4400 /* If we set the longjmp breakpoint via a SystemTap
4401 probe, then use it to extract the arguments. The
4402 destination PC is the third argument to the
4403 probe. */
4404 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4405 if (arg_value)
4406 jmp_buf_pc = value_as_address (arg_value);
4407 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4408 || !gdbarch_get_longjmp_target (gdbarch,
4409 frame, &jmp_buf_pc))
4410 {
4411 if (debug_infrun)
4412 fprintf_unfiltered (gdb_stdlog,
4413 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4414 "(!gdbarch_get_longjmp_target)\n");
4415 keep_going (ecs);
4416 return;
4417 }
4418
4419 /* Insert a breakpoint at resume address. */
4420 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4421 }
4422 else
4423 check_exception_resume (ecs, frame);
4424 keep_going (ecs);
4425 return;
4426
4427 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4428 {
4429 struct frame_info *init_frame;
4430
4431 /* There are several cases to consider.
4432
4433 1. The initiating frame no longer exists. In this case
4434 we must stop, because the exception or longjmp has gone
4435 too far.
4436
4437 2. The initiating frame exists, and is the same as the
4438 current frame. We stop, because the exception or
4439 longjmp has been caught.
4440
4441 3. The initiating frame exists and is different from
4442 the current frame. This means the exception or longjmp
4443 has been caught beneath the initiating frame, so keep
4444 going.
4445
4446 4. longjmp breakpoint has been placed just to protect
4447 against stale dummy frames and user is not interested
4448 in stopping around longjmps. */
4449
4450 if (debug_infrun)
4451 fprintf_unfiltered (gdb_stdlog,
4452 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4453
4454 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4455 != NULL);
4456 delete_exception_resume_breakpoint (ecs->event_thread);
4457
4458 if (what.is_longjmp)
4459 {
4460 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread->num);
4461
4462 if (!frame_id_p (ecs->event_thread->initiating_frame))
4463 {
4464 /* Case 4. */
4465 keep_going (ecs);
4466 return;
4467 }
4468 }
4469
4470 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4471
4472 if (init_frame)
4473 {
4474 struct frame_id current_id
4475 = get_frame_id (get_current_frame ());
4476 if (frame_id_eq (current_id,
4477 ecs->event_thread->initiating_frame))
4478 {
4479 /* Case 2. Fall through. */
4480 }
4481 else
4482 {
4483 /* Case 3. */
4484 keep_going (ecs);
4485 return;
4486 }
4487 }
4488
4489 /* For Cases 1 and 2, remove the step-resume breakpoint,
4490 if it exists. */
4491 delete_step_resume_breakpoint (ecs->event_thread);
4492
4493 ecs->event_thread->control.stop_step = 1;
4494 print_end_stepping_range_reason ();
4495 stop_stepping (ecs);
4496 }
4497 return;
4498
4499 case BPSTAT_WHAT_SINGLE:
4500 if (debug_infrun)
4501 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4502 ecs->event_thread->stepping_over_breakpoint = 1;
4503 /* Still need to check other stuff, at least the case where
4504 we are stepping and step out of the right range. */
4505 break;
4506
4507 case BPSTAT_WHAT_STEP_RESUME:
4508 if (debug_infrun)
4509 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4510
4511 delete_step_resume_breakpoint (ecs->event_thread);
4512 if (ecs->event_thread->control.proceed_to_finish
4513 && execution_direction == EXEC_REVERSE)
4514 {
4515 struct thread_info *tp = ecs->event_thread;
4516
4517 /* We are finishing a function in reverse, and just hit
4518 the step-resume breakpoint at the start address of
4519 the function, and we're almost there -- just need to
4520 back up by one more single-step, which should take us
4521 back to the function call. */
4522 tp->control.step_range_start = tp->control.step_range_end = 1;
4523 keep_going (ecs);
4524 return;
4525 }
4526 fill_in_stop_func (gdbarch, ecs);
4527 if (stop_pc == ecs->stop_func_start
4528 && execution_direction == EXEC_REVERSE)
4529 {
4530 /* We are stepping over a function call in reverse, and
4531 just hit the step-resume breakpoint at the start
4532 address of the function. Go back to single-stepping,
4533 which should take us back to the function call. */
4534 ecs->event_thread->stepping_over_breakpoint = 1;
4535 keep_going (ecs);
4536 return;
4537 }
4538 break;
4539
4540 case BPSTAT_WHAT_STOP_NOISY:
4541 if (debug_infrun)
4542 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4543 stop_print_frame = 1;
4544
4545 /* We are about to nuke the step_resume_breakpointt via the
4546 cleanup chain, so no need to worry about it here. */
4547
4548 stop_stepping (ecs);
4549 return;
4550
4551 case BPSTAT_WHAT_STOP_SILENT:
4552 if (debug_infrun)
4553 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4554 stop_print_frame = 0;
4555
4556 /* We are about to nuke the step_resume_breakpoin via the
4557 cleanup chain, so no need to worry about it here. */
4558
4559 stop_stepping (ecs);
4560 return;
4561
4562 case BPSTAT_WHAT_HP_STEP_RESUME:
4563 if (debug_infrun)
4564 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4565
4566 delete_step_resume_breakpoint (ecs->event_thread);
4567 if (ecs->event_thread->step_after_step_resume_breakpoint)
4568 {
4569 /* Back when the step-resume breakpoint was inserted, we
4570 were trying to single-step off a breakpoint. Go back
4571 to doing that. */
4572 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4573 ecs->event_thread->stepping_over_breakpoint = 1;
4574 keep_going (ecs);
4575 return;
4576 }
4577 break;
4578
4579 case BPSTAT_WHAT_KEEP_CHECKING:
4580 break;
4581 }
4582 }
4583
4584 /* We come here if we hit a breakpoint but should not
4585 stop for it. Possibly we also were stepping
4586 and should stop for that. So fall through and
4587 test for stepping. But, if not stepping,
4588 do not stop. */
4589
4590 /* In all-stop mode, if we're currently stepping but have stopped in
4591 some other thread, we need to switch back to the stepped thread. */
4592 if (!non_stop)
4593 {
4594 struct thread_info *tp;
4595
4596 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4597 ecs->event_thread);
4598 if (tp)
4599 {
4600 /* However, if the current thread is blocked on some internal
4601 breakpoint, and we simply need to step over that breakpoint
4602 to get it going again, do that first. */
4603 if ((ecs->event_thread->control.trap_expected
4604 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
4605 || ecs->event_thread->stepping_over_breakpoint)
4606 {
4607 keep_going (ecs);
4608 return;
4609 }
4610
4611 /* If the stepping thread exited, then don't try to switch
4612 back and resume it, which could fail in several different
4613 ways depending on the target. Instead, just keep going.
4614
4615 We can find a stepping dead thread in the thread list in
4616 two cases:
4617
4618 - The target supports thread exit events, and when the
4619 target tries to delete the thread from the thread list,
4620 inferior_ptid pointed at the exiting thread. In such
4621 case, calling delete_thread does not really remove the
4622 thread from the list; instead, the thread is left listed,
4623 with 'exited' state.
4624
4625 - The target's debug interface does not support thread
4626 exit events, and so we have no idea whatsoever if the
4627 previously stepping thread is still alive. For that
4628 reason, we need to synchronously query the target
4629 now. */
4630 if (is_exited (tp->ptid)
4631 || !target_thread_alive (tp->ptid))
4632 {
4633 if (debug_infrun)
4634 fprintf_unfiltered (gdb_stdlog,
4635 "infrun: not switching back to "
4636 "stepped thread, it has vanished\n");
4637
4638 delete_thread (tp->ptid);
4639 keep_going (ecs);
4640 return;
4641 }
4642
4643 /* Otherwise, we no longer expect a trap in the current thread.
4644 Clear the trap_expected flag before switching back -- this is
4645 what keep_going would do as well, if we called it. */
4646 ecs->event_thread->control.trap_expected = 0;
4647
4648 if (debug_infrun)
4649 fprintf_unfiltered (gdb_stdlog,
4650 "infrun: switching back to stepped thread\n");
4651
4652 ecs->event_thread = tp;
4653 ecs->ptid = tp->ptid;
4654 context_switch (ecs->ptid);
4655 keep_going (ecs);
4656 return;
4657 }
4658 }
4659
4660 if (ecs->event_thread->control.step_resume_breakpoint)
4661 {
4662 if (debug_infrun)
4663 fprintf_unfiltered (gdb_stdlog,
4664 "infrun: step-resume breakpoint is inserted\n");
4665
4666 /* Having a step-resume breakpoint overrides anything
4667 else having to do with stepping commands until
4668 that breakpoint is reached. */
4669 keep_going (ecs);
4670 return;
4671 }
4672
4673 if (ecs->event_thread->control.step_range_end == 0)
4674 {
4675 if (debug_infrun)
4676 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4677 /* Likewise if we aren't even stepping. */
4678 keep_going (ecs);
4679 return;
4680 }
4681
4682 /* Re-fetch current thread's frame in case the code above caused
4683 the frame cache to be re-initialized, making our FRAME variable
4684 a dangling pointer. */
4685 frame = get_current_frame ();
4686 gdbarch = get_frame_arch (frame);
4687 fill_in_stop_func (gdbarch, ecs);
4688
4689 /* If stepping through a line, keep going if still within it.
4690
4691 Note that step_range_end is the address of the first instruction
4692 beyond the step range, and NOT the address of the last instruction
4693 within it!
4694
4695 Note also that during reverse execution, we may be stepping
4696 through a function epilogue and therefore must detect when
4697 the current-frame changes in the middle of a line. */
4698
4699 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4700 && (execution_direction != EXEC_REVERSE
4701 || frame_id_eq (get_frame_id (frame),
4702 ecs->event_thread->control.step_frame_id)))
4703 {
4704 if (debug_infrun)
4705 fprintf_unfiltered
4706 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4707 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4708 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4709
4710 /* Tentatively re-enable range stepping; `resume' disables it if
4711 necessary (e.g., if we're stepping over a breakpoint or we
4712 have software watchpoints). */
4713 ecs->event_thread->control.may_range_step = 1;
4714
4715 /* When stepping backward, stop at beginning of line range
4716 (unless it's the function entry point, in which case
4717 keep going back to the call point). */
4718 if (stop_pc == ecs->event_thread->control.step_range_start
4719 && stop_pc != ecs->stop_func_start
4720 && execution_direction == EXEC_REVERSE)
4721 {
4722 ecs->event_thread->control.stop_step = 1;
4723 print_end_stepping_range_reason ();
4724 stop_stepping (ecs);
4725 }
4726 else
4727 keep_going (ecs);
4728
4729 return;
4730 }
4731
4732 /* We stepped out of the stepping range. */
4733
4734 /* If we are stepping at the source level and entered the runtime
4735 loader dynamic symbol resolution code...
4736
4737 EXEC_FORWARD: we keep on single stepping until we exit the run
4738 time loader code and reach the callee's address.
4739
4740 EXEC_REVERSE: we've already executed the callee (backward), and
4741 the runtime loader code is handled just like any other
4742 undebuggable function call. Now we need only keep stepping
4743 backward through the trampoline code, and that's handled further
4744 down, so there is nothing for us to do here. */
4745
4746 if (execution_direction != EXEC_REVERSE
4747 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4748 && in_solib_dynsym_resolve_code (stop_pc))
4749 {
4750 CORE_ADDR pc_after_resolver =
4751 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4752
4753 if (debug_infrun)
4754 fprintf_unfiltered (gdb_stdlog,
4755 "infrun: stepped into dynsym resolve code\n");
4756
4757 if (pc_after_resolver)
4758 {
4759 /* Set up a step-resume breakpoint at the address
4760 indicated by SKIP_SOLIB_RESOLVER. */
4761 struct symtab_and_line sr_sal;
4762
4763 init_sal (&sr_sal);
4764 sr_sal.pc = pc_after_resolver;
4765 sr_sal.pspace = get_frame_program_space (frame);
4766
4767 insert_step_resume_breakpoint_at_sal (gdbarch,
4768 sr_sal, null_frame_id);
4769 }
4770
4771 keep_going (ecs);
4772 return;
4773 }
4774
4775 if (ecs->event_thread->control.step_range_end != 1
4776 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4777 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4778 && get_frame_type (frame) == SIGTRAMP_FRAME)
4779 {
4780 if (debug_infrun)
4781 fprintf_unfiltered (gdb_stdlog,
4782 "infrun: stepped into signal trampoline\n");
4783 /* The inferior, while doing a "step" or "next", has ended up in
4784 a signal trampoline (either by a signal being delivered or by
4785 the signal handler returning). Just single-step until the
4786 inferior leaves the trampoline (either by calling the handler
4787 or returning). */
4788 keep_going (ecs);
4789 return;
4790 }
4791
4792 /* If we're in the return path from a shared library trampoline,
4793 we want to proceed through the trampoline when stepping. */
4794 /* macro/2012-04-25: This needs to come before the subroutine
4795 call check below as on some targets return trampolines look
4796 like subroutine calls (MIPS16 return thunks). */
4797 if (gdbarch_in_solib_return_trampoline (gdbarch,
4798 stop_pc, ecs->stop_func_name)
4799 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4800 {
4801 /* Determine where this trampoline returns. */
4802 CORE_ADDR real_stop_pc;
4803
4804 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4805
4806 if (debug_infrun)
4807 fprintf_unfiltered (gdb_stdlog,
4808 "infrun: stepped into solib return tramp\n");
4809
4810 /* Only proceed through if we know where it's going. */
4811 if (real_stop_pc)
4812 {
4813 /* And put the step-breakpoint there and go until there. */
4814 struct symtab_and_line sr_sal;
4815
4816 init_sal (&sr_sal); /* initialize to zeroes */
4817 sr_sal.pc = real_stop_pc;
4818 sr_sal.section = find_pc_overlay (sr_sal.pc);
4819 sr_sal.pspace = get_frame_program_space (frame);
4820
4821 /* Do not specify what the fp should be when we stop since
4822 on some machines the prologue is where the new fp value
4823 is established. */
4824 insert_step_resume_breakpoint_at_sal (gdbarch,
4825 sr_sal, null_frame_id);
4826
4827 /* Restart without fiddling with the step ranges or
4828 other state. */
4829 keep_going (ecs);
4830 return;
4831 }
4832 }
4833
4834 /* Check for subroutine calls. The check for the current frame
4835 equalling the step ID is not necessary - the check of the
4836 previous frame's ID is sufficient - but it is a common case and
4837 cheaper than checking the previous frame's ID.
4838
4839 NOTE: frame_id_eq will never report two invalid frame IDs as
4840 being equal, so to get into this block, both the current and
4841 previous frame must have valid frame IDs. */
4842 /* The outer_frame_id check is a heuristic to detect stepping
4843 through startup code. If we step over an instruction which
4844 sets the stack pointer from an invalid value to a valid value,
4845 we may detect that as a subroutine call from the mythical
4846 "outermost" function. This could be fixed by marking
4847 outermost frames as !stack_p,code_p,special_p. Then the
4848 initial outermost frame, before sp was valid, would
4849 have code_addr == &_start. See the comment in frame_id_eq
4850 for more. */
4851 if (!frame_id_eq (get_stack_frame_id (frame),
4852 ecs->event_thread->control.step_stack_frame_id)
4853 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4854 ecs->event_thread->control.step_stack_frame_id)
4855 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4856 outer_frame_id)
4857 || step_start_function != find_pc_function (stop_pc))))
4858 {
4859 CORE_ADDR real_stop_pc;
4860
4861 if (debug_infrun)
4862 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4863
4864 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4865 || ((ecs->event_thread->control.step_range_end == 1)
4866 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4867 ecs->stop_func_start)))
4868 {
4869 /* I presume that step_over_calls is only 0 when we're
4870 supposed to be stepping at the assembly language level
4871 ("stepi"). Just stop. */
4872 /* Also, maybe we just did a "nexti" inside a prolog, so we
4873 thought it was a subroutine call but it was not. Stop as
4874 well. FENN */
4875 /* And this works the same backward as frontward. MVS */
4876 ecs->event_thread->control.stop_step = 1;
4877 print_end_stepping_range_reason ();
4878 stop_stepping (ecs);
4879 return;
4880 }
4881
4882 /* Reverse stepping through solib trampolines. */
4883
4884 if (execution_direction == EXEC_REVERSE
4885 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4886 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4887 || (ecs->stop_func_start == 0
4888 && in_solib_dynsym_resolve_code (stop_pc))))
4889 {
4890 /* Any solib trampoline code can be handled in reverse
4891 by simply continuing to single-step. We have already
4892 executed the solib function (backwards), and a few
4893 steps will take us back through the trampoline to the
4894 caller. */
4895 keep_going (ecs);
4896 return;
4897 }
4898
4899 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4900 {
4901 /* We're doing a "next".
4902
4903 Normal (forward) execution: set a breakpoint at the
4904 callee's return address (the address at which the caller
4905 will resume).
4906
4907 Reverse (backward) execution. set the step-resume
4908 breakpoint at the start of the function that we just
4909 stepped into (backwards), and continue to there. When we
4910 get there, we'll need to single-step back to the caller. */
4911
4912 if (execution_direction == EXEC_REVERSE)
4913 {
4914 /* If we're already at the start of the function, we've either
4915 just stepped backward into a single instruction function,
4916 or stepped back out of a signal handler to the first instruction
4917 of the function. Just keep going, which will single-step back
4918 to the caller. */
4919 if (ecs->stop_func_start != stop_pc)
4920 {
4921 struct symtab_and_line sr_sal;
4922
4923 /* Normal function call return (static or dynamic). */
4924 init_sal (&sr_sal);
4925 sr_sal.pc = ecs->stop_func_start;
4926 sr_sal.pspace = get_frame_program_space (frame);
4927 insert_step_resume_breakpoint_at_sal (gdbarch,
4928 sr_sal, null_frame_id);
4929 }
4930 }
4931 else
4932 insert_step_resume_breakpoint_at_caller (frame);
4933
4934 keep_going (ecs);
4935 return;
4936 }
4937
4938 /* If we are in a function call trampoline (a stub between the
4939 calling routine and the real function), locate the real
4940 function. That's what tells us (a) whether we want to step
4941 into it at all, and (b) what prologue we want to run to the
4942 end of, if we do step into it. */
4943 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4944 if (real_stop_pc == 0)
4945 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4946 if (real_stop_pc != 0)
4947 ecs->stop_func_start = real_stop_pc;
4948
4949 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4950 {
4951 struct symtab_and_line sr_sal;
4952
4953 init_sal (&sr_sal);
4954 sr_sal.pc = ecs->stop_func_start;
4955 sr_sal.pspace = get_frame_program_space (frame);
4956
4957 insert_step_resume_breakpoint_at_sal (gdbarch,
4958 sr_sal, null_frame_id);
4959 keep_going (ecs);
4960 return;
4961 }
4962
4963 /* If we have line number information for the function we are
4964 thinking of stepping into and the function isn't on the skip
4965 list, step into it.
4966
4967 If there are several symtabs at that PC (e.g. with include
4968 files), just want to know whether *any* of them have line
4969 numbers. find_pc_line handles this. */
4970 {
4971 struct symtab_and_line tmp_sal;
4972
4973 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4974 if (tmp_sal.line != 0
4975 && !function_name_is_marked_for_skip (ecs->stop_func_name,
4976 &tmp_sal))
4977 {
4978 if (execution_direction == EXEC_REVERSE)
4979 handle_step_into_function_backward (gdbarch, ecs);
4980 else
4981 handle_step_into_function (gdbarch, ecs);
4982 return;
4983 }
4984 }
4985
4986 /* If we have no line number and the step-stop-if-no-debug is
4987 set, we stop the step so that the user has a chance to switch
4988 in assembly mode. */
4989 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4990 && step_stop_if_no_debug)
4991 {
4992 ecs->event_thread->control.stop_step = 1;
4993 print_end_stepping_range_reason ();
4994 stop_stepping (ecs);
4995 return;
4996 }
4997
4998 if (execution_direction == EXEC_REVERSE)
4999 {
5000 /* If we're already at the start of the function, we've either just
5001 stepped backward into a single instruction function without line
5002 number info, or stepped back out of a signal handler to the first
5003 instruction of the function without line number info. Just keep
5004 going, which will single-step back to the caller. */
5005 if (ecs->stop_func_start != stop_pc)
5006 {
5007 /* Set a breakpoint at callee's start address.
5008 From there we can step once and be back in the caller. */
5009 struct symtab_and_line sr_sal;
5010
5011 init_sal (&sr_sal);
5012 sr_sal.pc = ecs->stop_func_start;
5013 sr_sal.pspace = get_frame_program_space (frame);
5014 insert_step_resume_breakpoint_at_sal (gdbarch,
5015 sr_sal, null_frame_id);
5016 }
5017 }
5018 else
5019 /* Set a breakpoint at callee's return address (the address
5020 at which the caller will resume). */
5021 insert_step_resume_breakpoint_at_caller (frame);
5022
5023 keep_going (ecs);
5024 return;
5025 }
5026
5027 /* Reverse stepping through solib trampolines. */
5028
5029 if (execution_direction == EXEC_REVERSE
5030 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5031 {
5032 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5033 || (ecs->stop_func_start == 0
5034 && in_solib_dynsym_resolve_code (stop_pc)))
5035 {
5036 /* Any solib trampoline code can be handled in reverse
5037 by simply continuing to single-step. We have already
5038 executed the solib function (backwards), and a few
5039 steps will take us back through the trampoline to the
5040 caller. */
5041 keep_going (ecs);
5042 return;
5043 }
5044 else if (in_solib_dynsym_resolve_code (stop_pc))
5045 {
5046 /* Stepped backward into the solib dynsym resolver.
5047 Set a breakpoint at its start and continue, then
5048 one more step will take us out. */
5049 struct symtab_and_line sr_sal;
5050
5051 init_sal (&sr_sal);
5052 sr_sal.pc = ecs->stop_func_start;
5053 sr_sal.pspace = get_frame_program_space (frame);
5054 insert_step_resume_breakpoint_at_sal (gdbarch,
5055 sr_sal, null_frame_id);
5056 keep_going (ecs);
5057 return;
5058 }
5059 }
5060
5061 stop_pc_sal = find_pc_line (stop_pc, 0);
5062
5063 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5064 the trampoline processing logic, however, there are some trampolines
5065 that have no names, so we should do trampoline handling first. */
5066 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5067 && ecs->stop_func_name == NULL
5068 && stop_pc_sal.line == 0)
5069 {
5070 if (debug_infrun)
5071 fprintf_unfiltered (gdb_stdlog,
5072 "infrun: stepped into undebuggable function\n");
5073
5074 /* The inferior just stepped into, or returned to, an
5075 undebuggable function (where there is no debugging information
5076 and no line number corresponding to the address where the
5077 inferior stopped). Since we want to skip this kind of code,
5078 we keep going until the inferior returns from this
5079 function - unless the user has asked us not to (via
5080 set step-mode) or we no longer know how to get back
5081 to the call site. */
5082 if (step_stop_if_no_debug
5083 || !frame_id_p (frame_unwind_caller_id (frame)))
5084 {
5085 /* If we have no line number and the step-stop-if-no-debug
5086 is set, we stop the step so that the user has a chance to
5087 switch in assembly mode. */
5088 ecs->event_thread->control.stop_step = 1;
5089 print_end_stepping_range_reason ();
5090 stop_stepping (ecs);
5091 return;
5092 }
5093 else
5094 {
5095 /* Set a breakpoint at callee's return address (the address
5096 at which the caller will resume). */
5097 insert_step_resume_breakpoint_at_caller (frame);
5098 keep_going (ecs);
5099 return;
5100 }
5101 }
5102
5103 if (ecs->event_thread->control.step_range_end == 1)
5104 {
5105 /* It is stepi or nexti. We always want to stop stepping after
5106 one instruction. */
5107 if (debug_infrun)
5108 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5109 ecs->event_thread->control.stop_step = 1;
5110 print_end_stepping_range_reason ();
5111 stop_stepping (ecs);
5112 return;
5113 }
5114
5115 if (stop_pc_sal.line == 0)
5116 {
5117 /* We have no line number information. That means to stop
5118 stepping (does this always happen right after one instruction,
5119 when we do "s" in a function with no line numbers,
5120 or can this happen as a result of a return or longjmp?). */
5121 if (debug_infrun)
5122 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5123 ecs->event_thread->control.stop_step = 1;
5124 print_end_stepping_range_reason ();
5125 stop_stepping (ecs);
5126 return;
5127 }
5128
5129 /* Look for "calls" to inlined functions, part one. If the inline
5130 frame machinery detected some skipped call sites, we have entered
5131 a new inline function. */
5132
5133 if (frame_id_eq (get_frame_id (get_current_frame ()),
5134 ecs->event_thread->control.step_frame_id)
5135 && inline_skipped_frames (ecs->ptid))
5136 {
5137 struct symtab_and_line call_sal;
5138
5139 if (debug_infrun)
5140 fprintf_unfiltered (gdb_stdlog,
5141 "infrun: stepped into inlined function\n");
5142
5143 find_frame_sal (get_current_frame (), &call_sal);
5144
5145 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5146 {
5147 /* For "step", we're going to stop. But if the call site
5148 for this inlined function is on the same source line as
5149 we were previously stepping, go down into the function
5150 first. Otherwise stop at the call site. */
5151
5152 if (call_sal.line == ecs->event_thread->current_line
5153 && call_sal.symtab == ecs->event_thread->current_symtab)
5154 step_into_inline_frame (ecs->ptid);
5155
5156 ecs->event_thread->control.stop_step = 1;
5157 print_end_stepping_range_reason ();
5158 stop_stepping (ecs);
5159 return;
5160 }
5161 else
5162 {
5163 /* For "next", we should stop at the call site if it is on a
5164 different source line. Otherwise continue through the
5165 inlined function. */
5166 if (call_sal.line == ecs->event_thread->current_line
5167 && call_sal.symtab == ecs->event_thread->current_symtab)
5168 keep_going (ecs);
5169 else
5170 {
5171 ecs->event_thread->control.stop_step = 1;
5172 print_end_stepping_range_reason ();
5173 stop_stepping (ecs);
5174 }
5175 return;
5176 }
5177 }
5178
5179 /* Look for "calls" to inlined functions, part two. If we are still
5180 in the same real function we were stepping through, but we have
5181 to go further up to find the exact frame ID, we are stepping
5182 through a more inlined call beyond its call site. */
5183
5184 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5185 && !frame_id_eq (get_frame_id (get_current_frame ()),
5186 ecs->event_thread->control.step_frame_id)
5187 && stepped_in_from (get_current_frame (),
5188 ecs->event_thread->control.step_frame_id))
5189 {
5190 if (debug_infrun)
5191 fprintf_unfiltered (gdb_stdlog,
5192 "infrun: stepping through inlined function\n");
5193
5194 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5195 keep_going (ecs);
5196 else
5197 {
5198 ecs->event_thread->control.stop_step = 1;
5199 print_end_stepping_range_reason ();
5200 stop_stepping (ecs);
5201 }
5202 return;
5203 }
5204
5205 if ((stop_pc == stop_pc_sal.pc)
5206 && (ecs->event_thread->current_line != stop_pc_sal.line
5207 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5208 {
5209 /* We are at the start of a different line. So stop. Note that
5210 we don't stop if we step into the middle of a different line.
5211 That is said to make things like for (;;) statements work
5212 better. */
5213 if (debug_infrun)
5214 fprintf_unfiltered (gdb_stdlog,
5215 "infrun: stepped to a different line\n");
5216 ecs->event_thread->control.stop_step = 1;
5217 print_end_stepping_range_reason ();
5218 stop_stepping (ecs);
5219 return;
5220 }
5221
5222 /* We aren't done stepping.
5223
5224 Optimize by setting the stepping range to the line.
5225 (We might not be in the original line, but if we entered a
5226 new line in mid-statement, we continue stepping. This makes
5227 things like for(;;) statements work better.) */
5228
5229 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5230 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5231 ecs->event_thread->control.may_range_step = 1;
5232 set_step_info (frame, stop_pc_sal);
5233
5234 if (debug_infrun)
5235 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5236 keep_going (ecs);
5237 }
5238
5239 /* Is thread TP in the middle of single-stepping? */
5240
5241 static int
5242 currently_stepping (struct thread_info *tp)
5243 {
5244 return ((tp->control.step_range_end
5245 && tp->control.step_resume_breakpoint == NULL)
5246 || tp->control.trap_expected
5247 || bpstat_should_step ());
5248 }
5249
5250 /* Returns true if any thread *but* the one passed in "data" is in the
5251 middle of stepping or of handling a "next". */
5252
5253 static int
5254 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5255 {
5256 if (tp == data)
5257 return 0;
5258
5259 return (tp->control.step_range_end
5260 || tp->control.trap_expected);
5261 }
5262
5263 /* Inferior has stepped into a subroutine call with source code that
5264 we should not step over. Do step to the first line of code in
5265 it. */
5266
5267 static void
5268 handle_step_into_function (struct gdbarch *gdbarch,
5269 struct execution_control_state *ecs)
5270 {
5271 struct symtab *s;
5272 struct symtab_and_line stop_func_sal, sr_sal;
5273
5274 fill_in_stop_func (gdbarch, ecs);
5275
5276 s = find_pc_symtab (stop_pc);
5277 if (s && s->language != language_asm)
5278 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5279 ecs->stop_func_start);
5280
5281 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5282 /* Use the step_resume_break to step until the end of the prologue,
5283 even if that involves jumps (as it seems to on the vax under
5284 4.2). */
5285 /* If the prologue ends in the middle of a source line, continue to
5286 the end of that source line (if it is still within the function).
5287 Otherwise, just go to end of prologue. */
5288 if (stop_func_sal.end
5289 && stop_func_sal.pc != ecs->stop_func_start
5290 && stop_func_sal.end < ecs->stop_func_end)
5291 ecs->stop_func_start = stop_func_sal.end;
5292
5293 /* Architectures which require breakpoint adjustment might not be able
5294 to place a breakpoint at the computed address. If so, the test
5295 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5296 ecs->stop_func_start to an address at which a breakpoint may be
5297 legitimately placed.
5298
5299 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5300 made, GDB will enter an infinite loop when stepping through
5301 optimized code consisting of VLIW instructions which contain
5302 subinstructions corresponding to different source lines. On
5303 FR-V, it's not permitted to place a breakpoint on any but the
5304 first subinstruction of a VLIW instruction. When a breakpoint is
5305 set, GDB will adjust the breakpoint address to the beginning of
5306 the VLIW instruction. Thus, we need to make the corresponding
5307 adjustment here when computing the stop address. */
5308
5309 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5310 {
5311 ecs->stop_func_start
5312 = gdbarch_adjust_breakpoint_address (gdbarch,
5313 ecs->stop_func_start);
5314 }
5315
5316 if (ecs->stop_func_start == stop_pc)
5317 {
5318 /* We are already there: stop now. */
5319 ecs->event_thread->control.stop_step = 1;
5320 print_end_stepping_range_reason ();
5321 stop_stepping (ecs);
5322 return;
5323 }
5324 else
5325 {
5326 /* Put the step-breakpoint there and go until there. */
5327 init_sal (&sr_sal); /* initialize to zeroes */
5328 sr_sal.pc = ecs->stop_func_start;
5329 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5330 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5331
5332 /* Do not specify what the fp should be when we stop since on
5333 some machines the prologue is where the new fp value is
5334 established. */
5335 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5336
5337 /* And make sure stepping stops right away then. */
5338 ecs->event_thread->control.step_range_end
5339 = ecs->event_thread->control.step_range_start;
5340 }
5341 keep_going (ecs);
5342 }
5343
5344 /* Inferior has stepped backward into a subroutine call with source
5345 code that we should not step over. Do step to the beginning of the
5346 last line of code in it. */
5347
5348 static void
5349 handle_step_into_function_backward (struct gdbarch *gdbarch,
5350 struct execution_control_state *ecs)
5351 {
5352 struct symtab *s;
5353 struct symtab_and_line stop_func_sal;
5354
5355 fill_in_stop_func (gdbarch, ecs);
5356
5357 s = find_pc_symtab (stop_pc);
5358 if (s && s->language != language_asm)
5359 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5360 ecs->stop_func_start);
5361
5362 stop_func_sal = find_pc_line (stop_pc, 0);
5363
5364 /* OK, we're just going to keep stepping here. */
5365 if (stop_func_sal.pc == stop_pc)
5366 {
5367 /* We're there already. Just stop stepping now. */
5368 ecs->event_thread->control.stop_step = 1;
5369 print_end_stepping_range_reason ();
5370 stop_stepping (ecs);
5371 }
5372 else
5373 {
5374 /* Else just reset the step range and keep going.
5375 No step-resume breakpoint, they don't work for
5376 epilogues, which can have multiple entry paths. */
5377 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5378 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5379 keep_going (ecs);
5380 }
5381 return;
5382 }
5383
5384 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5385 This is used to both functions and to skip over code. */
5386
5387 static void
5388 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5389 struct symtab_and_line sr_sal,
5390 struct frame_id sr_id,
5391 enum bptype sr_type)
5392 {
5393 /* There should never be more than one step-resume or longjmp-resume
5394 breakpoint per thread, so we should never be setting a new
5395 step_resume_breakpoint when one is already active. */
5396 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5397 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5398
5399 if (debug_infrun)
5400 fprintf_unfiltered (gdb_stdlog,
5401 "infrun: inserting step-resume breakpoint at %s\n",
5402 paddress (gdbarch, sr_sal.pc));
5403
5404 inferior_thread ()->control.step_resume_breakpoint
5405 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5406 }
5407
5408 void
5409 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5410 struct symtab_and_line sr_sal,
5411 struct frame_id sr_id)
5412 {
5413 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5414 sr_sal, sr_id,
5415 bp_step_resume);
5416 }
5417
5418 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5419 This is used to skip a potential signal handler.
5420
5421 This is called with the interrupted function's frame. The signal
5422 handler, when it returns, will resume the interrupted function at
5423 RETURN_FRAME.pc. */
5424
5425 static void
5426 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5427 {
5428 struct symtab_and_line sr_sal;
5429 struct gdbarch *gdbarch;
5430
5431 gdb_assert (return_frame != NULL);
5432 init_sal (&sr_sal); /* initialize to zeros */
5433
5434 gdbarch = get_frame_arch (return_frame);
5435 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5436 sr_sal.section = find_pc_overlay (sr_sal.pc);
5437 sr_sal.pspace = get_frame_program_space (return_frame);
5438
5439 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5440 get_stack_frame_id (return_frame),
5441 bp_hp_step_resume);
5442 }
5443
5444 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5445 is used to skip a function after stepping into it (for "next" or if
5446 the called function has no debugging information).
5447
5448 The current function has almost always been reached by single
5449 stepping a call or return instruction. NEXT_FRAME belongs to the
5450 current function, and the breakpoint will be set at the caller's
5451 resume address.
5452
5453 This is a separate function rather than reusing
5454 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5455 get_prev_frame, which may stop prematurely (see the implementation
5456 of frame_unwind_caller_id for an example). */
5457
5458 static void
5459 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5460 {
5461 struct symtab_and_line sr_sal;
5462 struct gdbarch *gdbarch;
5463
5464 /* We shouldn't have gotten here if we don't know where the call site
5465 is. */
5466 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5467
5468 init_sal (&sr_sal); /* initialize to zeros */
5469
5470 gdbarch = frame_unwind_caller_arch (next_frame);
5471 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5472 frame_unwind_caller_pc (next_frame));
5473 sr_sal.section = find_pc_overlay (sr_sal.pc);
5474 sr_sal.pspace = frame_unwind_program_space (next_frame);
5475
5476 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5477 frame_unwind_caller_id (next_frame));
5478 }
5479
5480 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5481 new breakpoint at the target of a jmp_buf. The handling of
5482 longjmp-resume uses the same mechanisms used for handling
5483 "step-resume" breakpoints. */
5484
5485 static void
5486 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5487 {
5488 /* There should never be more than one longjmp-resume breakpoint per
5489 thread, so we should never be setting a new
5490 longjmp_resume_breakpoint when one is already active. */
5491 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
5492
5493 if (debug_infrun)
5494 fprintf_unfiltered (gdb_stdlog,
5495 "infrun: inserting longjmp-resume breakpoint at %s\n",
5496 paddress (gdbarch, pc));
5497
5498 inferior_thread ()->control.exception_resume_breakpoint =
5499 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5500 }
5501
5502 /* Insert an exception resume breakpoint. TP is the thread throwing
5503 the exception. The block B is the block of the unwinder debug hook
5504 function. FRAME is the frame corresponding to the call to this
5505 function. SYM is the symbol of the function argument holding the
5506 target PC of the exception. */
5507
5508 static void
5509 insert_exception_resume_breakpoint (struct thread_info *tp,
5510 struct block *b,
5511 struct frame_info *frame,
5512 struct symbol *sym)
5513 {
5514 volatile struct gdb_exception e;
5515
5516 /* We want to ignore errors here. */
5517 TRY_CATCH (e, RETURN_MASK_ERROR)
5518 {
5519 struct symbol *vsym;
5520 struct value *value;
5521 CORE_ADDR handler;
5522 struct breakpoint *bp;
5523
5524 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5525 value = read_var_value (vsym, frame);
5526 /* If the value was optimized out, revert to the old behavior. */
5527 if (! value_optimized_out (value))
5528 {
5529 handler = value_as_address (value);
5530
5531 if (debug_infrun)
5532 fprintf_unfiltered (gdb_stdlog,
5533 "infrun: exception resume at %lx\n",
5534 (unsigned long) handler);
5535
5536 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5537 handler, bp_exception_resume);
5538
5539 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5540 frame = NULL;
5541
5542 bp->thread = tp->num;
5543 inferior_thread ()->control.exception_resume_breakpoint = bp;
5544 }
5545 }
5546 }
5547
5548 /* A helper for check_exception_resume that sets an
5549 exception-breakpoint based on a SystemTap probe. */
5550
5551 static void
5552 insert_exception_resume_from_probe (struct thread_info *tp,
5553 const struct probe *probe,
5554 struct frame_info *frame)
5555 {
5556 struct value *arg_value;
5557 CORE_ADDR handler;
5558 struct breakpoint *bp;
5559
5560 arg_value = probe_safe_evaluate_at_pc (frame, 1);
5561 if (!arg_value)
5562 return;
5563
5564 handler = value_as_address (arg_value);
5565
5566 if (debug_infrun)
5567 fprintf_unfiltered (gdb_stdlog,
5568 "infrun: exception resume at %s\n",
5569 paddress (get_objfile_arch (probe->objfile),
5570 handler));
5571
5572 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5573 handler, bp_exception_resume);
5574 bp->thread = tp->num;
5575 inferior_thread ()->control.exception_resume_breakpoint = bp;
5576 }
5577
5578 /* This is called when an exception has been intercepted. Check to
5579 see whether the exception's destination is of interest, and if so,
5580 set an exception resume breakpoint there. */
5581
5582 static void
5583 check_exception_resume (struct execution_control_state *ecs,
5584 struct frame_info *frame)
5585 {
5586 volatile struct gdb_exception e;
5587 const struct probe *probe;
5588 struct symbol *func;
5589
5590 /* First see if this exception unwinding breakpoint was set via a
5591 SystemTap probe point. If so, the probe has two arguments: the
5592 CFA and the HANDLER. We ignore the CFA, extract the handler, and
5593 set a breakpoint there. */
5594 probe = find_probe_by_pc (get_frame_pc (frame));
5595 if (probe)
5596 {
5597 insert_exception_resume_from_probe (ecs->event_thread, probe, frame);
5598 return;
5599 }
5600
5601 func = get_frame_function (frame);
5602 if (!func)
5603 return;
5604
5605 TRY_CATCH (e, RETURN_MASK_ERROR)
5606 {
5607 struct block *b;
5608 struct block_iterator iter;
5609 struct symbol *sym;
5610 int argno = 0;
5611
5612 /* The exception breakpoint is a thread-specific breakpoint on
5613 the unwinder's debug hook, declared as:
5614
5615 void _Unwind_DebugHook (void *cfa, void *handler);
5616
5617 The CFA argument indicates the frame to which control is
5618 about to be transferred. HANDLER is the destination PC.
5619
5620 We ignore the CFA and set a temporary breakpoint at HANDLER.
5621 This is not extremely efficient but it avoids issues in gdb
5622 with computing the DWARF CFA, and it also works even in weird
5623 cases such as throwing an exception from inside a signal
5624 handler. */
5625
5626 b = SYMBOL_BLOCK_VALUE (func);
5627 ALL_BLOCK_SYMBOLS (b, iter, sym)
5628 {
5629 if (!SYMBOL_IS_ARGUMENT (sym))
5630 continue;
5631
5632 if (argno == 0)
5633 ++argno;
5634 else
5635 {
5636 insert_exception_resume_breakpoint (ecs->event_thread,
5637 b, frame, sym);
5638 break;
5639 }
5640 }
5641 }
5642 }
5643
5644 static void
5645 stop_stepping (struct execution_control_state *ecs)
5646 {
5647 if (debug_infrun)
5648 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5649
5650 /* Let callers know we don't want to wait for the inferior anymore. */
5651 ecs->wait_some_more = 0;
5652 }
5653
5654 /* This function handles various cases where we need to continue
5655 waiting for the inferior. */
5656 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5657
5658 static void
5659 keep_going (struct execution_control_state *ecs)
5660 {
5661 /* Make sure normal_stop is called if we get a QUIT handled before
5662 reaching resume. */
5663 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5664
5665 /* Save the pc before execution, to compare with pc after stop. */
5666 ecs->event_thread->prev_pc
5667 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5668
5669 /* If we did not do break;, it means we should keep running the
5670 inferior and not return to debugger. */
5671
5672 if (ecs->event_thread->control.trap_expected
5673 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5674 {
5675 /* We took a signal (which we are supposed to pass through to
5676 the inferior, else we'd not get here) and we haven't yet
5677 gotten our trap. Simply continue. */
5678
5679 discard_cleanups (old_cleanups);
5680 resume (currently_stepping (ecs->event_thread),
5681 ecs->event_thread->suspend.stop_signal);
5682 }
5683 else
5684 {
5685 /* Either the trap was not expected, but we are continuing
5686 anyway (the user asked that this signal be passed to the
5687 child)
5688 -- or --
5689 The signal was SIGTRAP, e.g. it was our signal, but we
5690 decided we should resume from it.
5691
5692 We're going to run this baby now!
5693
5694 Note that insert_breakpoints won't try to re-insert
5695 already inserted breakpoints. Therefore, we don't
5696 care if breakpoints were already inserted, or not. */
5697
5698 if (ecs->event_thread->stepping_over_breakpoint)
5699 {
5700 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5701
5702 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5703 /* Since we can't do a displaced step, we have to remove
5704 the breakpoint while we step it. To keep things
5705 simple, we remove them all. */
5706 remove_breakpoints ();
5707 }
5708 else
5709 {
5710 volatile struct gdb_exception e;
5711
5712 /* Stop stepping when inserting breakpoints
5713 has failed. */
5714 TRY_CATCH (e, RETURN_MASK_ERROR)
5715 {
5716 insert_breakpoints ();
5717 }
5718 if (e.reason < 0)
5719 {
5720 exception_print (gdb_stderr, e);
5721 stop_stepping (ecs);
5722 return;
5723 }
5724 }
5725
5726 ecs->event_thread->control.trap_expected
5727 = ecs->event_thread->stepping_over_breakpoint;
5728
5729 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5730 specifies that such a signal should be delivered to the
5731 target program).
5732
5733 Typically, this would occure when a user is debugging a
5734 target monitor on a simulator: the target monitor sets a
5735 breakpoint; the simulator encounters this break-point and
5736 halts the simulation handing control to GDB; GDB, noteing
5737 that the break-point isn't valid, returns control back to the
5738 simulator; the simulator then delivers the hardware
5739 equivalent of a SIGNAL_TRAP to the program being debugged. */
5740
5741 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
5742 && !signal_program[ecs->event_thread->suspend.stop_signal])
5743 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5744
5745 discard_cleanups (old_cleanups);
5746 resume (currently_stepping (ecs->event_thread),
5747 ecs->event_thread->suspend.stop_signal);
5748 }
5749
5750 prepare_to_wait (ecs);
5751 }
5752
5753 /* This function normally comes after a resume, before
5754 handle_inferior_event exits. It takes care of any last bits of
5755 housekeeping, and sets the all-important wait_some_more flag. */
5756
5757 static void
5758 prepare_to_wait (struct execution_control_state *ecs)
5759 {
5760 if (debug_infrun)
5761 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5762
5763 /* This is the old end of the while loop. Let everybody know we
5764 want to wait for the inferior some more and get called again
5765 soon. */
5766 ecs->wait_some_more = 1;
5767 }
5768
5769 /* Several print_*_reason functions to print why the inferior has stopped.
5770 We always print something when the inferior exits, or receives a signal.
5771 The rest of the cases are dealt with later on in normal_stop and
5772 print_it_typical. Ideally there should be a call to one of these
5773 print_*_reason functions functions from handle_inferior_event each time
5774 stop_stepping is called. */
5775
5776 /* Print why the inferior has stopped.
5777 We are done with a step/next/si/ni command, print why the inferior has
5778 stopped. For now print nothing. Print a message only if not in the middle
5779 of doing a "step n" operation for n > 1. */
5780
5781 static void
5782 print_end_stepping_range_reason (void)
5783 {
5784 if ((!inferior_thread ()->step_multi
5785 || !inferior_thread ()->control.stop_step)
5786 && ui_out_is_mi_like_p (current_uiout))
5787 ui_out_field_string (current_uiout, "reason",
5788 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5789 }
5790
5791 /* The inferior was terminated by a signal, print why it stopped. */
5792
5793 static void
5794 print_signal_exited_reason (enum gdb_signal siggnal)
5795 {
5796 struct ui_out *uiout = current_uiout;
5797
5798 annotate_signalled ();
5799 if (ui_out_is_mi_like_p (uiout))
5800 ui_out_field_string
5801 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5802 ui_out_text (uiout, "\nProgram terminated with signal ");
5803 annotate_signal_name ();
5804 ui_out_field_string (uiout, "signal-name",
5805 gdb_signal_to_name (siggnal));
5806 annotate_signal_name_end ();
5807 ui_out_text (uiout, ", ");
5808 annotate_signal_string ();
5809 ui_out_field_string (uiout, "signal-meaning",
5810 gdb_signal_to_string (siggnal));
5811 annotate_signal_string_end ();
5812 ui_out_text (uiout, ".\n");
5813 ui_out_text (uiout, "The program no longer exists.\n");
5814 }
5815
5816 /* The inferior program is finished, print why it stopped. */
5817
5818 static void
5819 print_exited_reason (int exitstatus)
5820 {
5821 struct inferior *inf = current_inferior ();
5822 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5823 struct ui_out *uiout = current_uiout;
5824
5825 annotate_exited (exitstatus);
5826 if (exitstatus)
5827 {
5828 if (ui_out_is_mi_like_p (uiout))
5829 ui_out_field_string (uiout, "reason",
5830 async_reason_lookup (EXEC_ASYNC_EXITED));
5831 ui_out_text (uiout, "[Inferior ");
5832 ui_out_text (uiout, plongest (inf->num));
5833 ui_out_text (uiout, " (");
5834 ui_out_text (uiout, pidstr);
5835 ui_out_text (uiout, ") exited with code ");
5836 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5837 ui_out_text (uiout, "]\n");
5838 }
5839 else
5840 {
5841 if (ui_out_is_mi_like_p (uiout))
5842 ui_out_field_string
5843 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5844 ui_out_text (uiout, "[Inferior ");
5845 ui_out_text (uiout, plongest (inf->num));
5846 ui_out_text (uiout, " (");
5847 ui_out_text (uiout, pidstr);
5848 ui_out_text (uiout, ") exited normally]\n");
5849 }
5850 /* Support the --return-child-result option. */
5851 return_child_result_value = exitstatus;
5852 }
5853
5854 /* Signal received, print why the inferior has stopped. The signal table
5855 tells us to print about it. */
5856
5857 static void
5858 print_signal_received_reason (enum gdb_signal siggnal)
5859 {
5860 struct ui_out *uiout = current_uiout;
5861
5862 annotate_signal ();
5863
5864 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5865 {
5866 struct thread_info *t = inferior_thread ();
5867
5868 ui_out_text (uiout, "\n[");
5869 ui_out_field_string (uiout, "thread-name",
5870 target_pid_to_str (t->ptid));
5871 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5872 ui_out_text (uiout, " stopped");
5873 }
5874 else
5875 {
5876 ui_out_text (uiout, "\nProgram received signal ");
5877 annotate_signal_name ();
5878 if (ui_out_is_mi_like_p (uiout))
5879 ui_out_field_string
5880 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5881 ui_out_field_string (uiout, "signal-name",
5882 gdb_signal_to_name (siggnal));
5883 annotate_signal_name_end ();
5884 ui_out_text (uiout, ", ");
5885 annotate_signal_string ();
5886 ui_out_field_string (uiout, "signal-meaning",
5887 gdb_signal_to_string (siggnal));
5888 annotate_signal_string_end ();
5889 }
5890 ui_out_text (uiout, ".\n");
5891 }
5892
5893 /* Reverse execution: target ran out of history info, print why the inferior
5894 has stopped. */
5895
5896 static void
5897 print_no_history_reason (void)
5898 {
5899 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5900 }
5901
5902 /* Here to return control to GDB when the inferior stops for real.
5903 Print appropriate messages, remove breakpoints, give terminal our modes.
5904
5905 STOP_PRINT_FRAME nonzero means print the executing frame
5906 (pc, function, args, file, line number and line text).
5907 BREAKPOINTS_FAILED nonzero means stop was due to error
5908 attempting to insert breakpoints. */
5909
5910 void
5911 normal_stop (void)
5912 {
5913 struct target_waitstatus last;
5914 ptid_t last_ptid;
5915 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5916
5917 get_last_target_status (&last_ptid, &last);
5918
5919 /* If an exception is thrown from this point on, make sure to
5920 propagate GDB's knowledge of the executing state to the
5921 frontend/user running state. A QUIT is an easy exception to see
5922 here, so do this before any filtered output. */
5923 if (!non_stop)
5924 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5925 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5926 && last.kind != TARGET_WAITKIND_EXITED
5927 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5928 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5929
5930 /* In non-stop mode, we don't want GDB to switch threads behind the
5931 user's back, to avoid races where the user is typing a command to
5932 apply to thread x, but GDB switches to thread y before the user
5933 finishes entering the command. */
5934
5935 /* As with the notification of thread events, we want to delay
5936 notifying the user that we've switched thread context until
5937 the inferior actually stops.
5938
5939 There's no point in saying anything if the inferior has exited.
5940 Note that SIGNALLED here means "exited with a signal", not
5941 "received a signal". */
5942 if (!non_stop
5943 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5944 && target_has_execution
5945 && last.kind != TARGET_WAITKIND_SIGNALLED
5946 && last.kind != TARGET_WAITKIND_EXITED
5947 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5948 {
5949 target_terminal_ours_for_output ();
5950 printf_filtered (_("[Switching to %s]\n"),
5951 target_pid_to_str (inferior_ptid));
5952 annotate_thread_changed ();
5953 previous_inferior_ptid = inferior_ptid;
5954 }
5955
5956 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
5957 {
5958 gdb_assert (sync_execution || !target_can_async_p ());
5959
5960 target_terminal_ours_for_output ();
5961 printf_filtered (_("No unwaited-for children left.\n"));
5962 }
5963
5964 if (!breakpoints_always_inserted_mode () && target_has_execution)
5965 {
5966 if (remove_breakpoints ())
5967 {
5968 target_terminal_ours_for_output ();
5969 printf_filtered (_("Cannot remove breakpoints because "
5970 "program is no longer writable.\nFurther "
5971 "execution is probably impossible.\n"));
5972 }
5973 }
5974
5975 /* If an auto-display called a function and that got a signal,
5976 delete that auto-display to avoid an infinite recursion. */
5977
5978 if (stopped_by_random_signal)
5979 disable_current_display ();
5980
5981 /* Don't print a message if in the middle of doing a "step n"
5982 operation for n > 1 */
5983 if (target_has_execution
5984 && last.kind != TARGET_WAITKIND_SIGNALLED
5985 && last.kind != TARGET_WAITKIND_EXITED
5986 && inferior_thread ()->step_multi
5987 && inferior_thread ()->control.stop_step)
5988 goto done;
5989
5990 target_terminal_ours ();
5991 async_enable_stdin ();
5992
5993 /* Set the current source location. This will also happen if we
5994 display the frame below, but the current SAL will be incorrect
5995 during a user hook-stop function. */
5996 if (has_stack_frames () && !stop_stack_dummy)
5997 set_current_sal_from_frame (get_current_frame (), 1);
5998
5999 /* Let the user/frontend see the threads as stopped. */
6000 do_cleanups (old_chain);
6001
6002 /* Look up the hook_stop and run it (CLI internally handles problem
6003 of stop_command's pre-hook not existing). */
6004 if (stop_command)
6005 catch_errors (hook_stop_stub, stop_command,
6006 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6007
6008 if (!has_stack_frames ())
6009 goto done;
6010
6011 if (last.kind == TARGET_WAITKIND_SIGNALLED
6012 || last.kind == TARGET_WAITKIND_EXITED)
6013 goto done;
6014
6015 /* Select innermost stack frame - i.e., current frame is frame 0,
6016 and current location is based on that.
6017 Don't do this on return from a stack dummy routine,
6018 or if the program has exited. */
6019
6020 if (!stop_stack_dummy)
6021 {
6022 select_frame (get_current_frame ());
6023
6024 /* Print current location without a level number, if
6025 we have changed functions or hit a breakpoint.
6026 Print source line if we have one.
6027 bpstat_print() contains the logic deciding in detail
6028 what to print, based on the event(s) that just occurred. */
6029
6030 /* If --batch-silent is enabled then there's no need to print the current
6031 source location, and to try risks causing an error message about
6032 missing source files. */
6033 if (stop_print_frame && !batch_silent)
6034 {
6035 int bpstat_ret;
6036 int source_flag;
6037 int do_frame_printing = 1;
6038 struct thread_info *tp = inferior_thread ();
6039
6040 bpstat_ret = bpstat_print (tp->control.stop_bpstat, last.kind);
6041 switch (bpstat_ret)
6042 {
6043 case PRINT_UNKNOWN:
6044 /* FIXME: cagney/2002-12-01: Given that a frame ID does
6045 (or should) carry around the function and does (or
6046 should) use that when doing a frame comparison. */
6047 if (tp->control.stop_step
6048 && frame_id_eq (tp->control.step_frame_id,
6049 get_frame_id (get_current_frame ()))
6050 && step_start_function == find_pc_function (stop_pc))
6051 source_flag = SRC_LINE; /* Finished step, just
6052 print source line. */
6053 else
6054 source_flag = SRC_AND_LOC; /* Print location and
6055 source line. */
6056 break;
6057 case PRINT_SRC_AND_LOC:
6058 source_flag = SRC_AND_LOC; /* Print location and
6059 source line. */
6060 break;
6061 case PRINT_SRC_ONLY:
6062 source_flag = SRC_LINE;
6063 break;
6064 case PRINT_NOTHING:
6065 source_flag = SRC_LINE; /* something bogus */
6066 do_frame_printing = 0;
6067 break;
6068 default:
6069 internal_error (__FILE__, __LINE__, _("Unknown value."));
6070 }
6071
6072 /* The behavior of this routine with respect to the source
6073 flag is:
6074 SRC_LINE: Print only source line
6075 LOCATION: Print only location
6076 SRC_AND_LOC: Print location and source line. */
6077 if (do_frame_printing)
6078 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
6079
6080 /* Display the auto-display expressions. */
6081 do_displays ();
6082 }
6083 }
6084
6085 /* Save the function value return registers, if we care.
6086 We might be about to restore their previous contents. */
6087 if (inferior_thread ()->control.proceed_to_finish
6088 && execution_direction != EXEC_REVERSE)
6089 {
6090 /* This should not be necessary. */
6091 if (stop_registers)
6092 regcache_xfree (stop_registers);
6093
6094 /* NB: The copy goes through to the target picking up the value of
6095 all the registers. */
6096 stop_registers = regcache_dup (get_current_regcache ());
6097 }
6098
6099 if (stop_stack_dummy == STOP_STACK_DUMMY)
6100 {
6101 /* Pop the empty frame that contains the stack dummy.
6102 This also restores inferior state prior to the call
6103 (struct infcall_suspend_state). */
6104 struct frame_info *frame = get_current_frame ();
6105
6106 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6107 frame_pop (frame);
6108 /* frame_pop() calls reinit_frame_cache as the last thing it
6109 does which means there's currently no selected frame. We
6110 don't need to re-establish a selected frame if the dummy call
6111 returns normally, that will be done by
6112 restore_infcall_control_state. However, we do have to handle
6113 the case where the dummy call is returning after being
6114 stopped (e.g. the dummy call previously hit a breakpoint).
6115 We can't know which case we have so just always re-establish
6116 a selected frame here. */
6117 select_frame (get_current_frame ());
6118 }
6119
6120 done:
6121 annotate_stopped ();
6122
6123 /* Suppress the stop observer if we're in the middle of:
6124
6125 - a step n (n > 1), as there still more steps to be done.
6126
6127 - a "finish" command, as the observer will be called in
6128 finish_command_continuation, so it can include the inferior
6129 function's return value.
6130
6131 - calling an inferior function, as we pretend we inferior didn't
6132 run at all. The return value of the call is handled by the
6133 expression evaluator, through call_function_by_hand. */
6134
6135 if (!target_has_execution
6136 || last.kind == TARGET_WAITKIND_SIGNALLED
6137 || last.kind == TARGET_WAITKIND_EXITED
6138 || last.kind == TARGET_WAITKIND_NO_RESUMED
6139 || (!(inferior_thread ()->step_multi
6140 && inferior_thread ()->control.stop_step)
6141 && !(inferior_thread ()->control.stop_bpstat
6142 && inferior_thread ()->control.proceed_to_finish)
6143 && !inferior_thread ()->control.in_infcall))
6144 {
6145 if (!ptid_equal (inferior_ptid, null_ptid))
6146 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6147 stop_print_frame);
6148 else
6149 observer_notify_normal_stop (NULL, stop_print_frame);
6150 }
6151
6152 if (target_has_execution)
6153 {
6154 if (last.kind != TARGET_WAITKIND_SIGNALLED
6155 && last.kind != TARGET_WAITKIND_EXITED)
6156 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6157 Delete any breakpoint that is to be deleted at the next stop. */
6158 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6159 }
6160
6161 /* Try to get rid of automatically added inferiors that are no
6162 longer needed. Keeping those around slows down things linearly.
6163 Note that this never removes the current inferior. */
6164 prune_inferiors ();
6165 }
6166
6167 static int
6168 hook_stop_stub (void *cmd)
6169 {
6170 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6171 return (0);
6172 }
6173 \f
6174 int
6175 signal_stop_state (int signo)
6176 {
6177 return signal_stop[signo];
6178 }
6179
6180 int
6181 signal_print_state (int signo)
6182 {
6183 return signal_print[signo];
6184 }
6185
6186 int
6187 signal_pass_state (int signo)
6188 {
6189 return signal_program[signo];
6190 }
6191
6192 static void
6193 signal_cache_update (int signo)
6194 {
6195 if (signo == -1)
6196 {
6197 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6198 signal_cache_update (signo);
6199
6200 return;
6201 }
6202
6203 signal_pass[signo] = (signal_stop[signo] == 0
6204 && signal_print[signo] == 0
6205 && signal_program[signo] == 1
6206 && signal_catch[signo] == 0);
6207 }
6208
6209 int
6210 signal_stop_update (int signo, int state)
6211 {
6212 int ret = signal_stop[signo];
6213
6214 signal_stop[signo] = state;
6215 signal_cache_update (signo);
6216 return ret;
6217 }
6218
6219 int
6220 signal_print_update (int signo, int state)
6221 {
6222 int ret = signal_print[signo];
6223
6224 signal_print[signo] = state;
6225 signal_cache_update (signo);
6226 return ret;
6227 }
6228
6229 int
6230 signal_pass_update (int signo, int state)
6231 {
6232 int ret = signal_program[signo];
6233
6234 signal_program[signo] = state;
6235 signal_cache_update (signo);
6236 return ret;
6237 }
6238
6239 /* Update the global 'signal_catch' from INFO and notify the
6240 target. */
6241
6242 void
6243 signal_catch_update (const unsigned int *info)
6244 {
6245 int i;
6246
6247 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6248 signal_catch[i] = info[i] > 0;
6249 signal_cache_update (-1);
6250 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6251 }
6252
6253 static void
6254 sig_print_header (void)
6255 {
6256 printf_filtered (_("Signal Stop\tPrint\tPass "
6257 "to program\tDescription\n"));
6258 }
6259
6260 static void
6261 sig_print_info (enum gdb_signal oursig)
6262 {
6263 const char *name = gdb_signal_to_name (oursig);
6264 int name_padding = 13 - strlen (name);
6265
6266 if (name_padding <= 0)
6267 name_padding = 0;
6268
6269 printf_filtered ("%s", name);
6270 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6271 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6272 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6273 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6274 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6275 }
6276
6277 /* Specify how various signals in the inferior should be handled. */
6278
6279 static void
6280 handle_command (char *args, int from_tty)
6281 {
6282 char **argv;
6283 int digits, wordlen;
6284 int sigfirst, signum, siglast;
6285 enum gdb_signal oursig;
6286 int allsigs;
6287 int nsigs;
6288 unsigned char *sigs;
6289 struct cleanup *old_chain;
6290
6291 if (args == NULL)
6292 {
6293 error_no_arg (_("signal to handle"));
6294 }
6295
6296 /* Allocate and zero an array of flags for which signals to handle. */
6297
6298 nsigs = (int) GDB_SIGNAL_LAST;
6299 sigs = (unsigned char *) alloca (nsigs);
6300 memset (sigs, 0, nsigs);
6301
6302 /* Break the command line up into args. */
6303
6304 argv = gdb_buildargv (args);
6305 old_chain = make_cleanup_freeargv (argv);
6306
6307 /* Walk through the args, looking for signal oursigs, signal names, and
6308 actions. Signal numbers and signal names may be interspersed with
6309 actions, with the actions being performed for all signals cumulatively
6310 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6311
6312 while (*argv != NULL)
6313 {
6314 wordlen = strlen (*argv);
6315 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6316 {;
6317 }
6318 allsigs = 0;
6319 sigfirst = siglast = -1;
6320
6321 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6322 {
6323 /* Apply action to all signals except those used by the
6324 debugger. Silently skip those. */
6325 allsigs = 1;
6326 sigfirst = 0;
6327 siglast = nsigs - 1;
6328 }
6329 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6330 {
6331 SET_SIGS (nsigs, sigs, signal_stop);
6332 SET_SIGS (nsigs, sigs, signal_print);
6333 }
6334 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6335 {
6336 UNSET_SIGS (nsigs, sigs, signal_program);
6337 }
6338 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6339 {
6340 SET_SIGS (nsigs, sigs, signal_print);
6341 }
6342 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6343 {
6344 SET_SIGS (nsigs, sigs, signal_program);
6345 }
6346 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6347 {
6348 UNSET_SIGS (nsigs, sigs, signal_stop);
6349 }
6350 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6351 {
6352 SET_SIGS (nsigs, sigs, signal_program);
6353 }
6354 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6355 {
6356 UNSET_SIGS (nsigs, sigs, signal_print);
6357 UNSET_SIGS (nsigs, sigs, signal_stop);
6358 }
6359 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6360 {
6361 UNSET_SIGS (nsigs, sigs, signal_program);
6362 }
6363 else if (digits > 0)
6364 {
6365 /* It is numeric. The numeric signal refers to our own
6366 internal signal numbering from target.h, not to host/target
6367 signal number. This is a feature; users really should be
6368 using symbolic names anyway, and the common ones like
6369 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6370
6371 sigfirst = siglast = (int)
6372 gdb_signal_from_command (atoi (*argv));
6373 if ((*argv)[digits] == '-')
6374 {
6375 siglast = (int)
6376 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6377 }
6378 if (sigfirst > siglast)
6379 {
6380 /* Bet he didn't figure we'd think of this case... */
6381 signum = sigfirst;
6382 sigfirst = siglast;
6383 siglast = signum;
6384 }
6385 }
6386 else
6387 {
6388 oursig = gdb_signal_from_name (*argv);
6389 if (oursig != GDB_SIGNAL_UNKNOWN)
6390 {
6391 sigfirst = siglast = (int) oursig;
6392 }
6393 else
6394 {
6395 /* Not a number and not a recognized flag word => complain. */
6396 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6397 }
6398 }
6399
6400 /* If any signal numbers or symbol names were found, set flags for
6401 which signals to apply actions to. */
6402
6403 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6404 {
6405 switch ((enum gdb_signal) signum)
6406 {
6407 case GDB_SIGNAL_TRAP:
6408 case GDB_SIGNAL_INT:
6409 if (!allsigs && !sigs[signum])
6410 {
6411 if (query (_("%s is used by the debugger.\n\
6412 Are you sure you want to change it? "),
6413 gdb_signal_to_name ((enum gdb_signal) signum)))
6414 {
6415 sigs[signum] = 1;
6416 }
6417 else
6418 {
6419 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6420 gdb_flush (gdb_stdout);
6421 }
6422 }
6423 break;
6424 case GDB_SIGNAL_0:
6425 case GDB_SIGNAL_DEFAULT:
6426 case GDB_SIGNAL_UNKNOWN:
6427 /* Make sure that "all" doesn't print these. */
6428 break;
6429 default:
6430 sigs[signum] = 1;
6431 break;
6432 }
6433 }
6434
6435 argv++;
6436 }
6437
6438 for (signum = 0; signum < nsigs; signum++)
6439 if (sigs[signum])
6440 {
6441 signal_cache_update (-1);
6442 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6443 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
6444
6445 if (from_tty)
6446 {
6447 /* Show the results. */
6448 sig_print_header ();
6449 for (; signum < nsigs; signum++)
6450 if (sigs[signum])
6451 sig_print_info (signum);
6452 }
6453
6454 break;
6455 }
6456
6457 do_cleanups (old_chain);
6458 }
6459
6460 /* Complete the "handle" command. */
6461
6462 static VEC (char_ptr) *
6463 handle_completer (struct cmd_list_element *ignore,
6464 const char *text, const char *word)
6465 {
6466 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
6467 static const char * const keywords[] =
6468 {
6469 "all",
6470 "stop",
6471 "ignore",
6472 "print",
6473 "pass",
6474 "nostop",
6475 "noignore",
6476 "noprint",
6477 "nopass",
6478 NULL,
6479 };
6480
6481 vec_signals = signal_completer (ignore, text, word);
6482 vec_keywords = complete_on_enum (keywords, word, word);
6483
6484 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
6485 VEC_free (char_ptr, vec_signals);
6486 VEC_free (char_ptr, vec_keywords);
6487 return return_val;
6488 }
6489
6490 static void
6491 xdb_handle_command (char *args, int from_tty)
6492 {
6493 char **argv;
6494 struct cleanup *old_chain;
6495
6496 if (args == NULL)
6497 error_no_arg (_("xdb command"));
6498
6499 /* Break the command line up into args. */
6500
6501 argv = gdb_buildargv (args);
6502 old_chain = make_cleanup_freeargv (argv);
6503 if (argv[1] != (char *) NULL)
6504 {
6505 char *argBuf;
6506 int bufLen;
6507
6508 bufLen = strlen (argv[0]) + 20;
6509 argBuf = (char *) xmalloc (bufLen);
6510 if (argBuf)
6511 {
6512 int validFlag = 1;
6513 enum gdb_signal oursig;
6514
6515 oursig = gdb_signal_from_name (argv[0]);
6516 memset (argBuf, 0, bufLen);
6517 if (strcmp (argv[1], "Q") == 0)
6518 sprintf (argBuf, "%s %s", argv[0], "noprint");
6519 else
6520 {
6521 if (strcmp (argv[1], "s") == 0)
6522 {
6523 if (!signal_stop[oursig])
6524 sprintf (argBuf, "%s %s", argv[0], "stop");
6525 else
6526 sprintf (argBuf, "%s %s", argv[0], "nostop");
6527 }
6528 else if (strcmp (argv[1], "i") == 0)
6529 {
6530 if (!signal_program[oursig])
6531 sprintf (argBuf, "%s %s", argv[0], "pass");
6532 else
6533 sprintf (argBuf, "%s %s", argv[0], "nopass");
6534 }
6535 else if (strcmp (argv[1], "r") == 0)
6536 {
6537 if (!signal_print[oursig])
6538 sprintf (argBuf, "%s %s", argv[0], "print");
6539 else
6540 sprintf (argBuf, "%s %s", argv[0], "noprint");
6541 }
6542 else
6543 validFlag = 0;
6544 }
6545 if (validFlag)
6546 handle_command (argBuf, from_tty);
6547 else
6548 printf_filtered (_("Invalid signal handling flag.\n"));
6549 if (argBuf)
6550 xfree (argBuf);
6551 }
6552 }
6553 do_cleanups (old_chain);
6554 }
6555
6556 enum gdb_signal
6557 gdb_signal_from_command (int num)
6558 {
6559 if (num >= 1 && num <= 15)
6560 return (enum gdb_signal) num;
6561 error (_("Only signals 1-15 are valid as numeric signals.\n\
6562 Use \"info signals\" for a list of symbolic signals."));
6563 }
6564
6565 /* Print current contents of the tables set by the handle command.
6566 It is possible we should just be printing signals actually used
6567 by the current target (but for things to work right when switching
6568 targets, all signals should be in the signal tables). */
6569
6570 static void
6571 signals_info (char *signum_exp, int from_tty)
6572 {
6573 enum gdb_signal oursig;
6574
6575 sig_print_header ();
6576
6577 if (signum_exp)
6578 {
6579 /* First see if this is a symbol name. */
6580 oursig = gdb_signal_from_name (signum_exp);
6581 if (oursig == GDB_SIGNAL_UNKNOWN)
6582 {
6583 /* No, try numeric. */
6584 oursig =
6585 gdb_signal_from_command (parse_and_eval_long (signum_exp));
6586 }
6587 sig_print_info (oursig);
6588 return;
6589 }
6590
6591 printf_filtered ("\n");
6592 /* These ugly casts brought to you by the native VAX compiler. */
6593 for (oursig = GDB_SIGNAL_FIRST;
6594 (int) oursig < (int) GDB_SIGNAL_LAST;
6595 oursig = (enum gdb_signal) ((int) oursig + 1))
6596 {
6597 QUIT;
6598
6599 if (oursig != GDB_SIGNAL_UNKNOWN
6600 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
6601 sig_print_info (oursig);
6602 }
6603
6604 printf_filtered (_("\nUse the \"handle\" command "
6605 "to change these tables.\n"));
6606 }
6607
6608 /* Check if it makes sense to read $_siginfo from the current thread
6609 at this point. If not, throw an error. */
6610
6611 static void
6612 validate_siginfo_access (void)
6613 {
6614 /* No current inferior, no siginfo. */
6615 if (ptid_equal (inferior_ptid, null_ptid))
6616 error (_("No thread selected."));
6617
6618 /* Don't try to read from a dead thread. */
6619 if (is_exited (inferior_ptid))
6620 error (_("The current thread has terminated"));
6621
6622 /* ... or from a spinning thread. */
6623 if (is_running (inferior_ptid))
6624 error (_("Selected thread is running."));
6625 }
6626
6627 /* The $_siginfo convenience variable is a bit special. We don't know
6628 for sure the type of the value until we actually have a chance to
6629 fetch the data. The type can change depending on gdbarch, so it is
6630 also dependent on which thread you have selected.
6631
6632 1. making $_siginfo be an internalvar that creates a new value on
6633 access.
6634
6635 2. making the value of $_siginfo be an lval_computed value. */
6636
6637 /* This function implements the lval_computed support for reading a
6638 $_siginfo value. */
6639
6640 static void
6641 siginfo_value_read (struct value *v)
6642 {
6643 LONGEST transferred;
6644
6645 validate_siginfo_access ();
6646
6647 transferred =
6648 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6649 NULL,
6650 value_contents_all_raw (v),
6651 value_offset (v),
6652 TYPE_LENGTH (value_type (v)));
6653
6654 if (transferred != TYPE_LENGTH (value_type (v)))
6655 error (_("Unable to read siginfo"));
6656 }
6657
6658 /* This function implements the lval_computed support for writing a
6659 $_siginfo value. */
6660
6661 static void
6662 siginfo_value_write (struct value *v, struct value *fromval)
6663 {
6664 LONGEST transferred;
6665
6666 validate_siginfo_access ();
6667
6668 transferred = target_write (&current_target,
6669 TARGET_OBJECT_SIGNAL_INFO,
6670 NULL,
6671 value_contents_all_raw (fromval),
6672 value_offset (v),
6673 TYPE_LENGTH (value_type (fromval)));
6674
6675 if (transferred != TYPE_LENGTH (value_type (fromval)))
6676 error (_("Unable to write siginfo"));
6677 }
6678
6679 static const struct lval_funcs siginfo_value_funcs =
6680 {
6681 siginfo_value_read,
6682 siginfo_value_write
6683 };
6684
6685 /* Return a new value with the correct type for the siginfo object of
6686 the current thread using architecture GDBARCH. Return a void value
6687 if there's no object available. */
6688
6689 static struct value *
6690 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
6691 void *ignore)
6692 {
6693 if (target_has_stack
6694 && !ptid_equal (inferior_ptid, null_ptid)
6695 && gdbarch_get_siginfo_type_p (gdbarch))
6696 {
6697 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6698
6699 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6700 }
6701
6702 return allocate_value (builtin_type (gdbarch)->builtin_void);
6703 }
6704
6705 \f
6706 /* infcall_suspend_state contains state about the program itself like its
6707 registers and any signal it received when it last stopped.
6708 This state must be restored regardless of how the inferior function call
6709 ends (either successfully, or after it hits a breakpoint or signal)
6710 if the program is to properly continue where it left off. */
6711
6712 struct infcall_suspend_state
6713 {
6714 struct thread_suspend_state thread_suspend;
6715 #if 0 /* Currently unused and empty structures are not valid C. */
6716 struct inferior_suspend_state inferior_suspend;
6717 #endif
6718
6719 /* Other fields: */
6720 CORE_ADDR stop_pc;
6721 struct regcache *registers;
6722
6723 /* Format of SIGINFO_DATA or NULL if it is not present. */
6724 struct gdbarch *siginfo_gdbarch;
6725
6726 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6727 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6728 content would be invalid. */
6729 gdb_byte *siginfo_data;
6730 };
6731
6732 struct infcall_suspend_state *
6733 save_infcall_suspend_state (void)
6734 {
6735 struct infcall_suspend_state *inf_state;
6736 struct thread_info *tp = inferior_thread ();
6737 #if 0
6738 struct inferior *inf = current_inferior ();
6739 #endif
6740 struct regcache *regcache = get_current_regcache ();
6741 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6742 gdb_byte *siginfo_data = NULL;
6743
6744 if (gdbarch_get_siginfo_type_p (gdbarch))
6745 {
6746 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6747 size_t len = TYPE_LENGTH (type);
6748 struct cleanup *back_to;
6749
6750 siginfo_data = xmalloc (len);
6751 back_to = make_cleanup (xfree, siginfo_data);
6752
6753 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6754 siginfo_data, 0, len) == len)
6755 discard_cleanups (back_to);
6756 else
6757 {
6758 /* Errors ignored. */
6759 do_cleanups (back_to);
6760 siginfo_data = NULL;
6761 }
6762 }
6763
6764 inf_state = XZALLOC (struct infcall_suspend_state);
6765
6766 if (siginfo_data)
6767 {
6768 inf_state->siginfo_gdbarch = gdbarch;
6769 inf_state->siginfo_data = siginfo_data;
6770 }
6771
6772 inf_state->thread_suspend = tp->suspend;
6773 #if 0 /* Currently unused and empty structures are not valid C. */
6774 inf_state->inferior_suspend = inf->suspend;
6775 #endif
6776
6777 /* run_inferior_call will not use the signal due to its `proceed' call with
6778 GDB_SIGNAL_0 anyway. */
6779 tp->suspend.stop_signal = GDB_SIGNAL_0;
6780
6781 inf_state->stop_pc = stop_pc;
6782
6783 inf_state->registers = regcache_dup (regcache);
6784
6785 return inf_state;
6786 }
6787
6788 /* Restore inferior session state to INF_STATE. */
6789
6790 void
6791 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6792 {
6793 struct thread_info *tp = inferior_thread ();
6794 #if 0
6795 struct inferior *inf = current_inferior ();
6796 #endif
6797 struct regcache *regcache = get_current_regcache ();
6798 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6799
6800 tp->suspend = inf_state->thread_suspend;
6801 #if 0 /* Currently unused and empty structures are not valid C. */
6802 inf->suspend = inf_state->inferior_suspend;
6803 #endif
6804
6805 stop_pc = inf_state->stop_pc;
6806
6807 if (inf_state->siginfo_gdbarch == gdbarch)
6808 {
6809 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6810
6811 /* Errors ignored. */
6812 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6813 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
6814 }
6815
6816 /* The inferior can be gone if the user types "print exit(0)"
6817 (and perhaps other times). */
6818 if (target_has_execution)
6819 /* NB: The register write goes through to the target. */
6820 regcache_cpy (regcache, inf_state->registers);
6821
6822 discard_infcall_suspend_state (inf_state);
6823 }
6824
6825 static void
6826 do_restore_infcall_suspend_state_cleanup (void *state)
6827 {
6828 restore_infcall_suspend_state (state);
6829 }
6830
6831 struct cleanup *
6832 make_cleanup_restore_infcall_suspend_state
6833 (struct infcall_suspend_state *inf_state)
6834 {
6835 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6836 }
6837
6838 void
6839 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6840 {
6841 regcache_xfree (inf_state->registers);
6842 xfree (inf_state->siginfo_data);
6843 xfree (inf_state);
6844 }
6845
6846 struct regcache *
6847 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6848 {
6849 return inf_state->registers;
6850 }
6851
6852 /* infcall_control_state contains state regarding gdb's control of the
6853 inferior itself like stepping control. It also contains session state like
6854 the user's currently selected frame. */
6855
6856 struct infcall_control_state
6857 {
6858 struct thread_control_state thread_control;
6859 struct inferior_control_state inferior_control;
6860
6861 /* Other fields: */
6862 enum stop_stack_kind stop_stack_dummy;
6863 int stopped_by_random_signal;
6864 int stop_after_trap;
6865
6866 /* ID if the selected frame when the inferior function call was made. */
6867 struct frame_id selected_frame_id;
6868 };
6869
6870 /* Save all of the information associated with the inferior<==>gdb
6871 connection. */
6872
6873 struct infcall_control_state *
6874 save_infcall_control_state (void)
6875 {
6876 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6877 struct thread_info *tp = inferior_thread ();
6878 struct inferior *inf = current_inferior ();
6879
6880 inf_status->thread_control = tp->control;
6881 inf_status->inferior_control = inf->control;
6882
6883 tp->control.step_resume_breakpoint = NULL;
6884 tp->control.exception_resume_breakpoint = NULL;
6885
6886 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6887 chain. If caller's caller is walking the chain, they'll be happier if we
6888 hand them back the original chain when restore_infcall_control_state is
6889 called. */
6890 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6891
6892 /* Other fields: */
6893 inf_status->stop_stack_dummy = stop_stack_dummy;
6894 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6895 inf_status->stop_after_trap = stop_after_trap;
6896
6897 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6898
6899 return inf_status;
6900 }
6901
6902 static int
6903 restore_selected_frame (void *args)
6904 {
6905 struct frame_id *fid = (struct frame_id *) args;
6906 struct frame_info *frame;
6907
6908 frame = frame_find_by_id (*fid);
6909
6910 /* If inf_status->selected_frame_id is NULL, there was no previously
6911 selected frame. */
6912 if (frame == NULL)
6913 {
6914 warning (_("Unable to restore previously selected frame."));
6915 return 0;
6916 }
6917
6918 select_frame (frame);
6919
6920 return (1);
6921 }
6922
6923 /* Restore inferior session state to INF_STATUS. */
6924
6925 void
6926 restore_infcall_control_state (struct infcall_control_state *inf_status)
6927 {
6928 struct thread_info *tp = inferior_thread ();
6929 struct inferior *inf = current_inferior ();
6930
6931 if (tp->control.step_resume_breakpoint)
6932 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6933
6934 if (tp->control.exception_resume_breakpoint)
6935 tp->control.exception_resume_breakpoint->disposition
6936 = disp_del_at_next_stop;
6937
6938 /* Handle the bpstat_copy of the chain. */
6939 bpstat_clear (&tp->control.stop_bpstat);
6940
6941 tp->control = inf_status->thread_control;
6942 inf->control = inf_status->inferior_control;
6943
6944 /* Other fields: */
6945 stop_stack_dummy = inf_status->stop_stack_dummy;
6946 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6947 stop_after_trap = inf_status->stop_after_trap;
6948
6949 if (target_has_stack)
6950 {
6951 /* The point of catch_errors is that if the stack is clobbered,
6952 walking the stack might encounter a garbage pointer and
6953 error() trying to dereference it. */
6954 if (catch_errors
6955 (restore_selected_frame, &inf_status->selected_frame_id,
6956 "Unable to restore previously selected frame:\n",
6957 RETURN_MASK_ERROR) == 0)
6958 /* Error in restoring the selected frame. Select the innermost
6959 frame. */
6960 select_frame (get_current_frame ());
6961 }
6962
6963 xfree (inf_status);
6964 }
6965
6966 static void
6967 do_restore_infcall_control_state_cleanup (void *sts)
6968 {
6969 restore_infcall_control_state (sts);
6970 }
6971
6972 struct cleanup *
6973 make_cleanup_restore_infcall_control_state
6974 (struct infcall_control_state *inf_status)
6975 {
6976 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6977 }
6978
6979 void
6980 discard_infcall_control_state (struct infcall_control_state *inf_status)
6981 {
6982 if (inf_status->thread_control.step_resume_breakpoint)
6983 inf_status->thread_control.step_resume_breakpoint->disposition
6984 = disp_del_at_next_stop;
6985
6986 if (inf_status->thread_control.exception_resume_breakpoint)
6987 inf_status->thread_control.exception_resume_breakpoint->disposition
6988 = disp_del_at_next_stop;
6989
6990 /* See save_infcall_control_state for info on stop_bpstat. */
6991 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6992
6993 xfree (inf_status);
6994 }
6995 \f
6996 int
6997 ptid_match (ptid_t ptid, ptid_t filter)
6998 {
6999 if (ptid_equal (filter, minus_one_ptid))
7000 return 1;
7001 if (ptid_is_pid (filter)
7002 && ptid_get_pid (ptid) == ptid_get_pid (filter))
7003 return 1;
7004 else if (ptid_equal (ptid, filter))
7005 return 1;
7006
7007 return 0;
7008 }
7009
7010 /* restore_inferior_ptid() will be used by the cleanup machinery
7011 to restore the inferior_ptid value saved in a call to
7012 save_inferior_ptid(). */
7013
7014 static void
7015 restore_inferior_ptid (void *arg)
7016 {
7017 ptid_t *saved_ptid_ptr = arg;
7018
7019 inferior_ptid = *saved_ptid_ptr;
7020 xfree (arg);
7021 }
7022
7023 /* Save the value of inferior_ptid so that it may be restored by a
7024 later call to do_cleanups(). Returns the struct cleanup pointer
7025 needed for later doing the cleanup. */
7026
7027 struct cleanup *
7028 save_inferior_ptid (void)
7029 {
7030 ptid_t *saved_ptid_ptr;
7031
7032 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7033 *saved_ptid_ptr = inferior_ptid;
7034 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7035 }
7036 \f
7037
7038 /* User interface for reverse debugging:
7039 Set exec-direction / show exec-direction commands
7040 (returns error unless target implements to_set_exec_direction method). */
7041
7042 int execution_direction = EXEC_FORWARD;
7043 static const char exec_forward[] = "forward";
7044 static const char exec_reverse[] = "reverse";
7045 static const char *exec_direction = exec_forward;
7046 static const char *const exec_direction_names[] = {
7047 exec_forward,
7048 exec_reverse,
7049 NULL
7050 };
7051
7052 static void
7053 set_exec_direction_func (char *args, int from_tty,
7054 struct cmd_list_element *cmd)
7055 {
7056 if (target_can_execute_reverse)
7057 {
7058 if (!strcmp (exec_direction, exec_forward))
7059 execution_direction = EXEC_FORWARD;
7060 else if (!strcmp (exec_direction, exec_reverse))
7061 execution_direction = EXEC_REVERSE;
7062 }
7063 else
7064 {
7065 exec_direction = exec_forward;
7066 error (_("Target does not support this operation."));
7067 }
7068 }
7069
7070 static void
7071 show_exec_direction_func (struct ui_file *out, int from_tty,
7072 struct cmd_list_element *cmd, const char *value)
7073 {
7074 switch (execution_direction) {
7075 case EXEC_FORWARD:
7076 fprintf_filtered (out, _("Forward.\n"));
7077 break;
7078 case EXEC_REVERSE:
7079 fprintf_filtered (out, _("Reverse.\n"));
7080 break;
7081 default:
7082 internal_error (__FILE__, __LINE__,
7083 _("bogus execution_direction value: %d"),
7084 (int) execution_direction);
7085 }
7086 }
7087
7088 /* User interface for non-stop mode. */
7089
7090 int non_stop = 0;
7091
7092 static void
7093 set_non_stop (char *args, int from_tty,
7094 struct cmd_list_element *c)
7095 {
7096 if (target_has_execution)
7097 {
7098 non_stop_1 = non_stop;
7099 error (_("Cannot change this setting while the inferior is running."));
7100 }
7101
7102 non_stop = non_stop_1;
7103 }
7104
7105 static void
7106 show_non_stop (struct ui_file *file, int from_tty,
7107 struct cmd_list_element *c, const char *value)
7108 {
7109 fprintf_filtered (file,
7110 _("Controlling the inferior in non-stop mode is %s.\n"),
7111 value);
7112 }
7113
7114 static void
7115 show_schedule_multiple (struct ui_file *file, int from_tty,
7116 struct cmd_list_element *c, const char *value)
7117 {
7118 fprintf_filtered (file, _("Resuming the execution of threads "
7119 "of all processes is %s.\n"), value);
7120 }
7121
7122 /* Implementation of `siginfo' variable. */
7123
7124 static const struct internalvar_funcs siginfo_funcs =
7125 {
7126 siginfo_make_value,
7127 NULL,
7128 NULL
7129 };
7130
7131 void
7132 _initialize_infrun (void)
7133 {
7134 int i;
7135 int numsigs;
7136 struct cmd_list_element *c;
7137
7138 add_info ("signals", signals_info, _("\
7139 What debugger does when program gets various signals.\n\
7140 Specify a signal as argument to print info on that signal only."));
7141 add_info_alias ("handle", "signals", 0);
7142
7143 c = add_com ("handle", class_run, handle_command, _("\
7144 Specify how to handle signals.\n\
7145 Usage: handle SIGNAL [ACTIONS]\n\
7146 Args are signals and actions to apply to those signals.\n\
7147 If no actions are specified, the current settings for the specified signals\n\
7148 will be displayed instead.\n\
7149 \n\
7150 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7151 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7152 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7153 The special arg \"all\" is recognized to mean all signals except those\n\
7154 used by the debugger, typically SIGTRAP and SIGINT.\n\
7155 \n\
7156 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7157 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7158 Stop means reenter debugger if this signal happens (implies print).\n\
7159 Print means print a message if this signal happens.\n\
7160 Pass means let program see this signal; otherwise program doesn't know.\n\
7161 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7162 Pass and Stop may be combined.\n\
7163 \n\
7164 Multiple signals may be specified. Signal numbers and signal names\n\
7165 may be interspersed with actions, with the actions being performed for\n\
7166 all signals cumulatively specified."));
7167 set_cmd_completer (c, handle_completer);
7168
7169 if (xdb_commands)
7170 {
7171 add_com ("lz", class_info, signals_info, _("\
7172 What debugger does when program gets various signals.\n\
7173 Specify a signal as argument to print info on that signal only."));
7174 add_com ("z", class_run, xdb_handle_command, _("\
7175 Specify how to handle a signal.\n\
7176 Args are signals and actions to apply to those signals.\n\
7177 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7178 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7179 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7180 The special arg \"all\" is recognized to mean all signals except those\n\
7181 used by the debugger, typically SIGTRAP and SIGINT.\n\
7182 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7183 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7184 nopass), \"Q\" (noprint)\n\
7185 Stop means reenter debugger if this signal happens (implies print).\n\
7186 Print means print a message if this signal happens.\n\
7187 Pass means let program see this signal; otherwise program doesn't know.\n\
7188 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7189 Pass and Stop may be combined."));
7190 }
7191
7192 if (!dbx_commands)
7193 stop_command = add_cmd ("stop", class_obscure,
7194 not_just_help_class_command, _("\
7195 There is no `stop' command, but you can set a hook on `stop'.\n\
7196 This allows you to set a list of commands to be run each time execution\n\
7197 of the program stops."), &cmdlist);
7198
7199 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7200 Set inferior debugging."), _("\
7201 Show inferior debugging."), _("\
7202 When non-zero, inferior specific debugging is enabled."),
7203 NULL,
7204 show_debug_infrun,
7205 &setdebuglist, &showdebuglist);
7206
7207 add_setshow_boolean_cmd ("displaced", class_maintenance,
7208 &debug_displaced, _("\
7209 Set displaced stepping debugging."), _("\
7210 Show displaced stepping debugging."), _("\
7211 When non-zero, displaced stepping specific debugging is enabled."),
7212 NULL,
7213 show_debug_displaced,
7214 &setdebuglist, &showdebuglist);
7215
7216 add_setshow_boolean_cmd ("non-stop", no_class,
7217 &non_stop_1, _("\
7218 Set whether gdb controls the inferior in non-stop mode."), _("\
7219 Show whether gdb controls the inferior in non-stop mode."), _("\
7220 When debugging a multi-threaded program and this setting is\n\
7221 off (the default, also called all-stop mode), when one thread stops\n\
7222 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7223 all other threads in the program while you interact with the thread of\n\
7224 interest. When you continue or step a thread, you can allow the other\n\
7225 threads to run, or have them remain stopped, but while you inspect any\n\
7226 thread's state, all threads stop.\n\
7227 \n\
7228 In non-stop mode, when one thread stops, other threads can continue\n\
7229 to run freely. You'll be able to step each thread independently,\n\
7230 leave it stopped or free to run as needed."),
7231 set_non_stop,
7232 show_non_stop,
7233 &setlist,
7234 &showlist);
7235
7236 numsigs = (int) GDB_SIGNAL_LAST;
7237 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7238 signal_print = (unsigned char *)
7239 xmalloc (sizeof (signal_print[0]) * numsigs);
7240 signal_program = (unsigned char *)
7241 xmalloc (sizeof (signal_program[0]) * numsigs);
7242 signal_catch = (unsigned char *)
7243 xmalloc (sizeof (signal_catch[0]) * numsigs);
7244 signal_pass = (unsigned char *)
7245 xmalloc (sizeof (signal_program[0]) * numsigs);
7246 for (i = 0; i < numsigs; i++)
7247 {
7248 signal_stop[i] = 1;
7249 signal_print[i] = 1;
7250 signal_program[i] = 1;
7251 signal_catch[i] = 0;
7252 }
7253
7254 /* Signals caused by debugger's own actions
7255 should not be given to the program afterwards. */
7256 signal_program[GDB_SIGNAL_TRAP] = 0;
7257 signal_program[GDB_SIGNAL_INT] = 0;
7258
7259 /* Signals that are not errors should not normally enter the debugger. */
7260 signal_stop[GDB_SIGNAL_ALRM] = 0;
7261 signal_print[GDB_SIGNAL_ALRM] = 0;
7262 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7263 signal_print[GDB_SIGNAL_VTALRM] = 0;
7264 signal_stop[GDB_SIGNAL_PROF] = 0;
7265 signal_print[GDB_SIGNAL_PROF] = 0;
7266 signal_stop[GDB_SIGNAL_CHLD] = 0;
7267 signal_print[GDB_SIGNAL_CHLD] = 0;
7268 signal_stop[GDB_SIGNAL_IO] = 0;
7269 signal_print[GDB_SIGNAL_IO] = 0;
7270 signal_stop[GDB_SIGNAL_POLL] = 0;
7271 signal_print[GDB_SIGNAL_POLL] = 0;
7272 signal_stop[GDB_SIGNAL_URG] = 0;
7273 signal_print[GDB_SIGNAL_URG] = 0;
7274 signal_stop[GDB_SIGNAL_WINCH] = 0;
7275 signal_print[GDB_SIGNAL_WINCH] = 0;
7276 signal_stop[GDB_SIGNAL_PRIO] = 0;
7277 signal_print[GDB_SIGNAL_PRIO] = 0;
7278
7279 /* These signals are used internally by user-level thread
7280 implementations. (See signal(5) on Solaris.) Like the above
7281 signals, a healthy program receives and handles them as part of
7282 its normal operation. */
7283 signal_stop[GDB_SIGNAL_LWP] = 0;
7284 signal_print[GDB_SIGNAL_LWP] = 0;
7285 signal_stop[GDB_SIGNAL_WAITING] = 0;
7286 signal_print[GDB_SIGNAL_WAITING] = 0;
7287 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7288 signal_print[GDB_SIGNAL_CANCEL] = 0;
7289
7290 /* Update cached state. */
7291 signal_cache_update (-1);
7292
7293 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7294 &stop_on_solib_events, _("\
7295 Set stopping for shared library events."), _("\
7296 Show stopping for shared library events."), _("\
7297 If nonzero, gdb will give control to the user when the dynamic linker\n\
7298 notifies gdb of shared library events. The most common event of interest\n\
7299 to the user would be loading/unloading of a new library."),
7300 set_stop_on_solib_events,
7301 show_stop_on_solib_events,
7302 &setlist, &showlist);
7303
7304 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7305 follow_fork_mode_kind_names,
7306 &follow_fork_mode_string, _("\
7307 Set debugger response to a program call of fork or vfork."), _("\
7308 Show debugger response to a program call of fork or vfork."), _("\
7309 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7310 parent - the original process is debugged after a fork\n\
7311 child - the new process is debugged after a fork\n\
7312 The unfollowed process will continue to run.\n\
7313 By default, the debugger will follow the parent process."),
7314 NULL,
7315 show_follow_fork_mode_string,
7316 &setlist, &showlist);
7317
7318 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7319 follow_exec_mode_names,
7320 &follow_exec_mode_string, _("\
7321 Set debugger response to a program call of exec."), _("\
7322 Show debugger response to a program call of exec."), _("\
7323 An exec call replaces the program image of a process.\n\
7324 \n\
7325 follow-exec-mode can be:\n\
7326 \n\
7327 new - the debugger creates a new inferior and rebinds the process\n\
7328 to this new inferior. The program the process was running before\n\
7329 the exec call can be restarted afterwards by restarting the original\n\
7330 inferior.\n\
7331 \n\
7332 same - the debugger keeps the process bound to the same inferior.\n\
7333 The new executable image replaces the previous executable loaded in\n\
7334 the inferior. Restarting the inferior after the exec call restarts\n\
7335 the executable the process was running after the exec call.\n\
7336 \n\
7337 By default, the debugger will use the same inferior."),
7338 NULL,
7339 show_follow_exec_mode_string,
7340 &setlist, &showlist);
7341
7342 add_setshow_enum_cmd ("scheduler-locking", class_run,
7343 scheduler_enums, &scheduler_mode, _("\
7344 Set mode for locking scheduler during execution."), _("\
7345 Show mode for locking scheduler during execution."), _("\
7346 off == no locking (threads may preempt at any time)\n\
7347 on == full locking (no thread except the current thread may run)\n\
7348 step == scheduler locked during every single-step operation.\n\
7349 In this mode, no other thread may run during a step command.\n\
7350 Other threads may run while stepping over a function call ('next')."),
7351 set_schedlock_func, /* traps on target vector */
7352 show_scheduler_mode,
7353 &setlist, &showlist);
7354
7355 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7356 Set mode for resuming threads of all processes."), _("\
7357 Show mode for resuming threads of all processes."), _("\
7358 When on, execution commands (such as 'continue' or 'next') resume all\n\
7359 threads of all processes. When off (which is the default), execution\n\
7360 commands only resume the threads of the current process. The set of\n\
7361 threads that are resumed is further refined by the scheduler-locking\n\
7362 mode (see help set scheduler-locking)."),
7363 NULL,
7364 show_schedule_multiple,
7365 &setlist, &showlist);
7366
7367 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7368 Set mode of the step operation."), _("\
7369 Show mode of the step operation."), _("\
7370 When set, doing a step over a function without debug line information\n\
7371 will stop at the first instruction of that function. Otherwise, the\n\
7372 function is skipped and the step command stops at a different source line."),
7373 NULL,
7374 show_step_stop_if_no_debug,
7375 &setlist, &showlist);
7376
7377 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7378 &can_use_displaced_stepping, _("\
7379 Set debugger's willingness to use displaced stepping."), _("\
7380 Show debugger's willingness to use displaced stepping."), _("\
7381 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7382 supported by the target architecture. If off, gdb will not use displaced\n\
7383 stepping to step over breakpoints, even if such is supported by the target\n\
7384 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7385 if the target architecture supports it and non-stop mode is active, but will not\n\
7386 use it in all-stop mode (see help set non-stop)."),
7387 NULL,
7388 show_can_use_displaced_stepping,
7389 &setlist, &showlist);
7390
7391 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7392 &exec_direction, _("Set direction of execution.\n\
7393 Options are 'forward' or 'reverse'."),
7394 _("Show direction of execution (forward/reverse)."),
7395 _("Tells gdb whether to execute forward or backward."),
7396 set_exec_direction_func, show_exec_direction_func,
7397 &setlist, &showlist);
7398
7399 /* Set/show detach-on-fork: user-settable mode. */
7400
7401 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7402 Set whether gdb will detach the child of a fork."), _("\
7403 Show whether gdb will detach the child of a fork."), _("\
7404 Tells gdb whether to detach the child of a fork."),
7405 NULL, NULL, &setlist, &showlist);
7406
7407 /* Set/show disable address space randomization mode. */
7408
7409 add_setshow_boolean_cmd ("disable-randomization", class_support,
7410 &disable_randomization, _("\
7411 Set disabling of debuggee's virtual address space randomization."), _("\
7412 Show disabling of debuggee's virtual address space randomization."), _("\
7413 When this mode is on (which is the default), randomization of the virtual\n\
7414 address space is disabled. Standalone programs run with the randomization\n\
7415 enabled by default on some platforms."),
7416 &set_disable_randomization,
7417 &show_disable_randomization,
7418 &setlist, &showlist);
7419
7420 /* ptid initializations */
7421 inferior_ptid = null_ptid;
7422 target_last_wait_ptid = minus_one_ptid;
7423
7424 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7425 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7426 observer_attach_thread_exit (infrun_thread_thread_exit);
7427 observer_attach_inferior_exit (infrun_inferior_exit);
7428
7429 /* Explicitly create without lookup, since that tries to create a
7430 value with a void typed value, and when we get here, gdbarch
7431 isn't initialized yet. At this point, we're quite sure there
7432 isn't another convenience variable of the same name. */
7433 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7434
7435 add_setshow_boolean_cmd ("observer", no_class,
7436 &observer_mode_1, _("\
7437 Set whether gdb controls the inferior in observer mode."), _("\
7438 Show whether gdb controls the inferior in observer mode."), _("\
7439 In observer mode, GDB can get data from the inferior, but not\n\
7440 affect its execution. Registers and memory may not be changed,\n\
7441 breakpoints may not be set, and the program cannot be interrupted\n\
7442 or signalled."),
7443 set_observer_mode,
7444 show_observer_mode,
7445 &setlist,
7446 &showlist);
7447 }
This page took 0.19857 seconds and 4 git commands to generate.