daily update
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2012 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdb_string.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55 #include "continuations.h"
56 #include "interps.h"
57 #include "skip.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149
150 /* Support for disabling address space randomization. */
151
152 int disable_randomization = 1;
153
154 static void
155 show_disable_randomization (struct ui_file *file, int from_tty,
156 struct cmd_list_element *c, const char *value)
157 {
158 if (target_supports_disable_randomization ())
159 fprintf_filtered (file,
160 _("Disabling randomization of debuggee's "
161 "virtual address space is %s.\n"),
162 value);
163 else
164 fputs_filtered (_("Disabling randomization of debuggee's "
165 "virtual address space is unsupported on\n"
166 "this platform.\n"), file);
167 }
168
169 static void
170 set_disable_randomization (char *args, int from_tty,
171 struct cmd_list_element *c)
172 {
173 if (!target_supports_disable_randomization ())
174 error (_("Disabling randomization of debuggee's "
175 "virtual address space is unsupported on\n"
176 "this platform."));
177 }
178
179
180 /* If the program uses ELF-style shared libraries, then calls to
181 functions in shared libraries go through stubs, which live in a
182 table called the PLT (Procedure Linkage Table). The first time the
183 function is called, the stub sends control to the dynamic linker,
184 which looks up the function's real address, patches the stub so
185 that future calls will go directly to the function, and then passes
186 control to the function.
187
188 If we are stepping at the source level, we don't want to see any of
189 this --- we just want to skip over the stub and the dynamic linker.
190 The simple approach is to single-step until control leaves the
191 dynamic linker.
192
193 However, on some systems (e.g., Red Hat's 5.2 distribution) the
194 dynamic linker calls functions in the shared C library, so you
195 can't tell from the PC alone whether the dynamic linker is still
196 running. In this case, we use a step-resume breakpoint to get us
197 past the dynamic linker, as if we were using "next" to step over a
198 function call.
199
200 in_solib_dynsym_resolve_code() says whether we're in the dynamic
201 linker code or not. Normally, this means we single-step. However,
202 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
203 address where we can place a step-resume breakpoint to get past the
204 linker's symbol resolution function.
205
206 in_solib_dynsym_resolve_code() can generally be implemented in a
207 pretty portable way, by comparing the PC against the address ranges
208 of the dynamic linker's sections.
209
210 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
211 it depends on internal details of the dynamic linker. It's usually
212 not too hard to figure out where to put a breakpoint, but it
213 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
214 sanity checking. If it can't figure things out, returning zero and
215 getting the (possibly confusing) stepping behavior is better than
216 signalling an error, which will obscure the change in the
217 inferior's state. */
218
219 /* This function returns TRUE if pc is the address of an instruction
220 that lies within the dynamic linker (such as the event hook, or the
221 dld itself).
222
223 This function must be used only when a dynamic linker event has
224 been caught, and the inferior is being stepped out of the hook, or
225 undefined results are guaranteed. */
226
227 #ifndef SOLIB_IN_DYNAMIC_LINKER
228 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
229 #endif
230
231 /* "Observer mode" is somewhat like a more extreme version of
232 non-stop, in which all GDB operations that might affect the
233 target's execution have been disabled. */
234
235 static int non_stop_1 = 0;
236
237 int observer_mode = 0;
238 static int observer_mode_1 = 0;
239
240 static void
241 set_observer_mode (char *args, int from_tty,
242 struct cmd_list_element *c)
243 {
244 extern int pagination_enabled;
245
246 if (target_has_execution)
247 {
248 observer_mode_1 = observer_mode;
249 error (_("Cannot change this setting while the inferior is running."));
250 }
251
252 observer_mode = observer_mode_1;
253
254 may_write_registers = !observer_mode;
255 may_write_memory = !observer_mode;
256 may_insert_breakpoints = !observer_mode;
257 may_insert_tracepoints = !observer_mode;
258 /* We can insert fast tracepoints in or out of observer mode,
259 but enable them if we're going into this mode. */
260 if (observer_mode)
261 may_insert_fast_tracepoints = 1;
262 may_stop = !observer_mode;
263 update_target_permissions ();
264
265 /* Going *into* observer mode we must force non-stop, then
266 going out we leave it that way. */
267 if (observer_mode)
268 {
269 target_async_permitted = 1;
270 pagination_enabled = 0;
271 non_stop = non_stop_1 = 1;
272 }
273
274 if (from_tty)
275 printf_filtered (_("Observer mode is now %s.\n"),
276 (observer_mode ? "on" : "off"));
277 }
278
279 static void
280 show_observer_mode (struct ui_file *file, int from_tty,
281 struct cmd_list_element *c, const char *value)
282 {
283 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
284 }
285
286 /* This updates the value of observer mode based on changes in
287 permissions. Note that we are deliberately ignoring the values of
288 may-write-registers and may-write-memory, since the user may have
289 reason to enable these during a session, for instance to turn on a
290 debugging-related global. */
291
292 void
293 update_observer_mode (void)
294 {
295 int newval;
296
297 newval = (!may_insert_breakpoints
298 && !may_insert_tracepoints
299 && may_insert_fast_tracepoints
300 && !may_stop
301 && non_stop);
302
303 /* Let the user know if things change. */
304 if (newval != observer_mode)
305 printf_filtered (_("Observer mode is now %s.\n"),
306 (newval ? "on" : "off"));
307
308 observer_mode = observer_mode_1 = newval;
309 }
310
311 /* Tables of how to react to signals; the user sets them. */
312
313 static unsigned char *signal_stop;
314 static unsigned char *signal_print;
315 static unsigned char *signal_program;
316
317 /* Table of signals that the target may silently handle.
318 This is automatically determined from the flags above,
319 and simply cached here. */
320 static unsigned char *signal_pass;
321
322 #define SET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 1; \
328 } while (0)
329
330 #define UNSET_SIGS(nsigs,sigs,flags) \
331 do { \
332 int signum = (nsigs); \
333 while (signum-- > 0) \
334 if ((sigs)[signum]) \
335 (flags)[signum] = 0; \
336 } while (0)
337
338 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
339 this function is to avoid exporting `signal_program'. */
340
341 void
342 update_signals_program_target (void)
343 {
344 target_program_signals ((int) TARGET_SIGNAL_LAST, signal_program);
345 }
346
347 /* Value to pass to target_resume() to cause all threads to resume. */
348
349 #define RESUME_ALL minus_one_ptid
350
351 /* Command list pointer for the "stop" placeholder. */
352
353 static struct cmd_list_element *stop_command;
354
355 /* Function inferior was in as of last step command. */
356
357 static struct symbol *step_start_function;
358
359 /* Nonzero if we want to give control to the user when we're notified
360 of shared library events by the dynamic linker. */
361 int stop_on_solib_events;
362 static void
363 show_stop_on_solib_events (struct ui_file *file, int from_tty,
364 struct cmd_list_element *c, const char *value)
365 {
366 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
367 value);
368 }
369
370 /* Nonzero means expecting a trace trap
371 and should stop the inferior and return silently when it happens. */
372
373 int stop_after_trap;
374
375 /* Save register contents here when executing a "finish" command or are
376 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
377 Thus this contains the return value from the called function (assuming
378 values are returned in a register). */
379
380 struct regcache *stop_registers;
381
382 /* Nonzero after stop if current stack frame should be printed. */
383
384 static int stop_print_frame;
385
386 /* This is a cached copy of the pid/waitstatus of the last event
387 returned by target_wait()/deprecated_target_wait_hook(). This
388 information is returned by get_last_target_status(). */
389 static ptid_t target_last_wait_ptid;
390 static struct target_waitstatus target_last_waitstatus;
391
392 static void context_switch (ptid_t ptid);
393
394 void init_thread_stepping_state (struct thread_info *tss);
395
396 void init_infwait_state (void);
397
398 static const char follow_fork_mode_child[] = "child";
399 static const char follow_fork_mode_parent[] = "parent";
400
401 static const char *const follow_fork_mode_kind_names[] = {
402 follow_fork_mode_child,
403 follow_fork_mode_parent,
404 NULL
405 };
406
407 static const char *follow_fork_mode_string = follow_fork_mode_parent;
408 static void
409 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
410 struct cmd_list_element *c, const char *value)
411 {
412 fprintf_filtered (file,
413 _("Debugger response to a program "
414 "call of fork or vfork is \"%s\".\n"),
415 value);
416 }
417 \f
418
419 /* Tell the target to follow the fork we're stopped at. Returns true
420 if the inferior should be resumed; false, if the target for some
421 reason decided it's best not to resume. */
422
423 static int
424 follow_fork (void)
425 {
426 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
427 int should_resume = 1;
428 struct thread_info *tp;
429
430 /* Copy user stepping state to the new inferior thread. FIXME: the
431 followed fork child thread should have a copy of most of the
432 parent thread structure's run control related fields, not just these.
433 Initialized to avoid "may be used uninitialized" warnings from gcc. */
434 struct breakpoint *step_resume_breakpoint = NULL;
435 struct breakpoint *exception_resume_breakpoint = NULL;
436 CORE_ADDR step_range_start = 0;
437 CORE_ADDR step_range_end = 0;
438 struct frame_id step_frame_id = { 0 };
439
440 if (!non_stop)
441 {
442 ptid_t wait_ptid;
443 struct target_waitstatus wait_status;
444
445 /* Get the last target status returned by target_wait(). */
446 get_last_target_status (&wait_ptid, &wait_status);
447
448 /* If not stopped at a fork event, then there's nothing else to
449 do. */
450 if (wait_status.kind != TARGET_WAITKIND_FORKED
451 && wait_status.kind != TARGET_WAITKIND_VFORKED)
452 return 1;
453
454 /* Check if we switched over from WAIT_PTID, since the event was
455 reported. */
456 if (!ptid_equal (wait_ptid, minus_one_ptid)
457 && !ptid_equal (inferior_ptid, wait_ptid))
458 {
459 /* We did. Switch back to WAIT_PTID thread, to tell the
460 target to follow it (in either direction). We'll
461 afterwards refuse to resume, and inform the user what
462 happened. */
463 switch_to_thread (wait_ptid);
464 should_resume = 0;
465 }
466 }
467
468 tp = inferior_thread ();
469
470 /* If there were any forks/vforks that were caught and are now to be
471 followed, then do so now. */
472 switch (tp->pending_follow.kind)
473 {
474 case TARGET_WAITKIND_FORKED:
475 case TARGET_WAITKIND_VFORKED:
476 {
477 ptid_t parent, child;
478
479 /* If the user did a next/step, etc, over a fork call,
480 preserve the stepping state in the fork child. */
481 if (follow_child && should_resume)
482 {
483 step_resume_breakpoint = clone_momentary_breakpoint
484 (tp->control.step_resume_breakpoint);
485 step_range_start = tp->control.step_range_start;
486 step_range_end = tp->control.step_range_end;
487 step_frame_id = tp->control.step_frame_id;
488 exception_resume_breakpoint
489 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
490
491 /* For now, delete the parent's sr breakpoint, otherwise,
492 parent/child sr breakpoints are considered duplicates,
493 and the child version will not be installed. Remove
494 this when the breakpoints module becomes aware of
495 inferiors and address spaces. */
496 delete_step_resume_breakpoint (tp);
497 tp->control.step_range_start = 0;
498 tp->control.step_range_end = 0;
499 tp->control.step_frame_id = null_frame_id;
500 delete_exception_resume_breakpoint (tp);
501 }
502
503 parent = inferior_ptid;
504 child = tp->pending_follow.value.related_pid;
505
506 /* Tell the target to do whatever is necessary to follow
507 either parent or child. */
508 if (target_follow_fork (follow_child))
509 {
510 /* Target refused to follow, or there's some other reason
511 we shouldn't resume. */
512 should_resume = 0;
513 }
514 else
515 {
516 /* This pending follow fork event is now handled, one way
517 or another. The previous selected thread may be gone
518 from the lists by now, but if it is still around, need
519 to clear the pending follow request. */
520 tp = find_thread_ptid (parent);
521 if (tp)
522 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
523
524 /* This makes sure we don't try to apply the "Switched
525 over from WAIT_PID" logic above. */
526 nullify_last_target_wait_ptid ();
527
528 /* If we followed the child, switch to it... */
529 if (follow_child)
530 {
531 switch_to_thread (child);
532
533 /* ... and preserve the stepping state, in case the
534 user was stepping over the fork call. */
535 if (should_resume)
536 {
537 tp = inferior_thread ();
538 tp->control.step_resume_breakpoint
539 = step_resume_breakpoint;
540 tp->control.step_range_start = step_range_start;
541 tp->control.step_range_end = step_range_end;
542 tp->control.step_frame_id = step_frame_id;
543 tp->control.exception_resume_breakpoint
544 = exception_resume_breakpoint;
545 }
546 else
547 {
548 /* If we get here, it was because we're trying to
549 resume from a fork catchpoint, but, the user
550 has switched threads away from the thread that
551 forked. In that case, the resume command
552 issued is most likely not applicable to the
553 child, so just warn, and refuse to resume. */
554 warning (_("Not resuming: switched threads "
555 "before following fork child.\n"));
556 }
557
558 /* Reset breakpoints in the child as appropriate. */
559 follow_inferior_reset_breakpoints ();
560 }
561 else
562 switch_to_thread (parent);
563 }
564 }
565 break;
566 case TARGET_WAITKIND_SPURIOUS:
567 /* Nothing to follow. */
568 break;
569 default:
570 internal_error (__FILE__, __LINE__,
571 "Unexpected pending_follow.kind %d\n",
572 tp->pending_follow.kind);
573 break;
574 }
575
576 return should_resume;
577 }
578
579 void
580 follow_inferior_reset_breakpoints (void)
581 {
582 struct thread_info *tp = inferior_thread ();
583
584 /* Was there a step_resume breakpoint? (There was if the user
585 did a "next" at the fork() call.) If so, explicitly reset its
586 thread number.
587
588 step_resumes are a form of bp that are made to be per-thread.
589 Since we created the step_resume bp when the parent process
590 was being debugged, and now are switching to the child process,
591 from the breakpoint package's viewpoint, that's a switch of
592 "threads". We must update the bp's notion of which thread
593 it is for, or it'll be ignored when it triggers. */
594
595 if (tp->control.step_resume_breakpoint)
596 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
597
598 if (tp->control.exception_resume_breakpoint)
599 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
600
601 /* Reinsert all breakpoints in the child. The user may have set
602 breakpoints after catching the fork, in which case those
603 were never set in the child, but only in the parent. This makes
604 sure the inserted breakpoints match the breakpoint list. */
605
606 breakpoint_re_set ();
607 insert_breakpoints ();
608 }
609
610 /* The child has exited or execed: resume threads of the parent the
611 user wanted to be executing. */
612
613 static int
614 proceed_after_vfork_done (struct thread_info *thread,
615 void *arg)
616 {
617 int pid = * (int *) arg;
618
619 if (ptid_get_pid (thread->ptid) == pid
620 && is_running (thread->ptid)
621 && !is_executing (thread->ptid)
622 && !thread->stop_requested
623 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
624 {
625 if (debug_infrun)
626 fprintf_unfiltered (gdb_stdlog,
627 "infrun: resuming vfork parent thread %s\n",
628 target_pid_to_str (thread->ptid));
629
630 switch_to_thread (thread->ptid);
631 clear_proceed_status ();
632 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
633 }
634
635 return 0;
636 }
637
638 /* Called whenever we notice an exec or exit event, to handle
639 detaching or resuming a vfork parent. */
640
641 static void
642 handle_vfork_child_exec_or_exit (int exec)
643 {
644 struct inferior *inf = current_inferior ();
645
646 if (inf->vfork_parent)
647 {
648 int resume_parent = -1;
649
650 /* This exec or exit marks the end of the shared memory region
651 between the parent and the child. If the user wanted to
652 detach from the parent, now is the time. */
653
654 if (inf->vfork_parent->pending_detach)
655 {
656 struct thread_info *tp;
657 struct cleanup *old_chain;
658 struct program_space *pspace;
659 struct address_space *aspace;
660
661 /* follow-fork child, detach-on-fork on. */
662
663 old_chain = make_cleanup_restore_current_thread ();
664
665 /* We're letting loose of the parent. */
666 tp = any_live_thread_of_process (inf->vfork_parent->pid);
667 switch_to_thread (tp->ptid);
668
669 /* We're about to detach from the parent, which implicitly
670 removes breakpoints from its address space. There's a
671 catch here: we want to reuse the spaces for the child,
672 but, parent/child are still sharing the pspace at this
673 point, although the exec in reality makes the kernel give
674 the child a fresh set of new pages. The problem here is
675 that the breakpoints module being unaware of this, would
676 likely chose the child process to write to the parent
677 address space. Swapping the child temporarily away from
678 the spaces has the desired effect. Yes, this is "sort
679 of" a hack. */
680
681 pspace = inf->pspace;
682 aspace = inf->aspace;
683 inf->aspace = NULL;
684 inf->pspace = NULL;
685
686 if (debug_infrun || info_verbose)
687 {
688 target_terminal_ours ();
689
690 if (exec)
691 fprintf_filtered (gdb_stdlog,
692 "Detaching vfork parent process "
693 "%d after child exec.\n",
694 inf->vfork_parent->pid);
695 else
696 fprintf_filtered (gdb_stdlog,
697 "Detaching vfork parent process "
698 "%d after child exit.\n",
699 inf->vfork_parent->pid);
700 }
701
702 target_detach (NULL, 0);
703
704 /* Put it back. */
705 inf->pspace = pspace;
706 inf->aspace = aspace;
707
708 do_cleanups (old_chain);
709 }
710 else if (exec)
711 {
712 /* We're staying attached to the parent, so, really give the
713 child a new address space. */
714 inf->pspace = add_program_space (maybe_new_address_space ());
715 inf->aspace = inf->pspace->aspace;
716 inf->removable = 1;
717 set_current_program_space (inf->pspace);
718
719 resume_parent = inf->vfork_parent->pid;
720
721 /* Break the bonds. */
722 inf->vfork_parent->vfork_child = NULL;
723 }
724 else
725 {
726 struct cleanup *old_chain;
727 struct program_space *pspace;
728
729 /* If this is a vfork child exiting, then the pspace and
730 aspaces were shared with the parent. Since we're
731 reporting the process exit, we'll be mourning all that is
732 found in the address space, and switching to null_ptid,
733 preparing to start a new inferior. But, since we don't
734 want to clobber the parent's address/program spaces, we
735 go ahead and create a new one for this exiting
736 inferior. */
737
738 /* Switch to null_ptid, so that clone_program_space doesn't want
739 to read the selected frame of a dead process. */
740 old_chain = save_inferior_ptid ();
741 inferior_ptid = null_ptid;
742
743 /* This inferior is dead, so avoid giving the breakpoints
744 module the option to write through to it (cloning a
745 program space resets breakpoints). */
746 inf->aspace = NULL;
747 inf->pspace = NULL;
748 pspace = add_program_space (maybe_new_address_space ());
749 set_current_program_space (pspace);
750 inf->removable = 1;
751 inf->symfile_flags = SYMFILE_NO_READ;
752 clone_program_space (pspace, inf->vfork_parent->pspace);
753 inf->pspace = pspace;
754 inf->aspace = pspace->aspace;
755
756 /* Put back inferior_ptid. We'll continue mourning this
757 inferior. */
758 do_cleanups (old_chain);
759
760 resume_parent = inf->vfork_parent->pid;
761 /* Break the bonds. */
762 inf->vfork_parent->vfork_child = NULL;
763 }
764
765 inf->vfork_parent = NULL;
766
767 gdb_assert (current_program_space == inf->pspace);
768
769 if (non_stop && resume_parent != -1)
770 {
771 /* If the user wanted the parent to be running, let it go
772 free now. */
773 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
774
775 if (debug_infrun)
776 fprintf_unfiltered (gdb_stdlog,
777 "infrun: resuming vfork parent process %d\n",
778 resume_parent);
779
780 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
781
782 do_cleanups (old_chain);
783 }
784 }
785 }
786
787 /* Enum strings for "set|show displaced-stepping". */
788
789 static const char follow_exec_mode_new[] = "new";
790 static const char follow_exec_mode_same[] = "same";
791 static const char *const follow_exec_mode_names[] =
792 {
793 follow_exec_mode_new,
794 follow_exec_mode_same,
795 NULL,
796 };
797
798 static const char *follow_exec_mode_string = follow_exec_mode_same;
799 static void
800 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
801 struct cmd_list_element *c, const char *value)
802 {
803 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
804 }
805
806 /* EXECD_PATHNAME is assumed to be non-NULL. */
807
808 static void
809 follow_exec (ptid_t pid, char *execd_pathname)
810 {
811 struct thread_info *th = inferior_thread ();
812 struct inferior *inf = current_inferior ();
813
814 /* This is an exec event that we actually wish to pay attention to.
815 Refresh our symbol table to the newly exec'd program, remove any
816 momentary bp's, etc.
817
818 If there are breakpoints, they aren't really inserted now,
819 since the exec() transformed our inferior into a fresh set
820 of instructions.
821
822 We want to preserve symbolic breakpoints on the list, since
823 we have hopes that they can be reset after the new a.out's
824 symbol table is read.
825
826 However, any "raw" breakpoints must be removed from the list
827 (e.g., the solib bp's), since their address is probably invalid
828 now.
829
830 And, we DON'T want to call delete_breakpoints() here, since
831 that may write the bp's "shadow contents" (the instruction
832 value that was overwritten witha TRAP instruction). Since
833 we now have a new a.out, those shadow contents aren't valid. */
834
835 mark_breakpoints_out ();
836
837 update_breakpoints_after_exec ();
838
839 /* If there was one, it's gone now. We cannot truly step-to-next
840 statement through an exec(). */
841 th->control.step_resume_breakpoint = NULL;
842 th->control.exception_resume_breakpoint = NULL;
843 th->control.step_range_start = 0;
844 th->control.step_range_end = 0;
845
846 /* The target reports the exec event to the main thread, even if
847 some other thread does the exec, and even if the main thread was
848 already stopped --- if debugging in non-stop mode, it's possible
849 the user had the main thread held stopped in the previous image
850 --- release it now. This is the same behavior as step-over-exec
851 with scheduler-locking on in all-stop mode. */
852 th->stop_requested = 0;
853
854 /* What is this a.out's name? */
855 printf_unfiltered (_("%s is executing new program: %s\n"),
856 target_pid_to_str (inferior_ptid),
857 execd_pathname);
858
859 /* We've followed the inferior through an exec. Therefore, the
860 inferior has essentially been killed & reborn. */
861
862 gdb_flush (gdb_stdout);
863
864 breakpoint_init_inferior (inf_execd);
865
866 if (gdb_sysroot && *gdb_sysroot)
867 {
868 char *name = alloca (strlen (gdb_sysroot)
869 + strlen (execd_pathname)
870 + 1);
871
872 strcpy (name, gdb_sysroot);
873 strcat (name, execd_pathname);
874 execd_pathname = name;
875 }
876
877 /* Reset the shared library package. This ensures that we get a
878 shlib event when the child reaches "_start", at which point the
879 dld will have had a chance to initialize the child. */
880 /* Also, loading a symbol file below may trigger symbol lookups, and
881 we don't want those to be satisfied by the libraries of the
882 previous incarnation of this process. */
883 no_shared_libraries (NULL, 0);
884
885 if (follow_exec_mode_string == follow_exec_mode_new)
886 {
887 struct program_space *pspace;
888
889 /* The user wants to keep the old inferior and program spaces
890 around. Create a new fresh one, and switch to it. */
891
892 inf = add_inferior (current_inferior ()->pid);
893 pspace = add_program_space (maybe_new_address_space ());
894 inf->pspace = pspace;
895 inf->aspace = pspace->aspace;
896
897 exit_inferior_num_silent (current_inferior ()->num);
898
899 set_current_inferior (inf);
900 set_current_program_space (pspace);
901 }
902
903 gdb_assert (current_program_space == inf->pspace);
904
905 /* That a.out is now the one to use. */
906 exec_file_attach (execd_pathname, 0);
907
908 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
909 (Position Independent Executable) main symbol file will get applied by
910 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
911 the breakpoints with the zero displacement. */
912
913 symbol_file_add (execd_pathname,
914 (inf->symfile_flags
915 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
916 NULL, 0);
917
918 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
919 set_initial_language ();
920
921 #ifdef SOLIB_CREATE_INFERIOR_HOOK
922 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
923 #else
924 solib_create_inferior_hook (0);
925 #endif
926
927 jit_inferior_created_hook ();
928
929 breakpoint_re_set ();
930
931 /* Reinsert all breakpoints. (Those which were symbolic have
932 been reset to the proper address in the new a.out, thanks
933 to symbol_file_command...). */
934 insert_breakpoints ();
935
936 /* The next resume of this inferior should bring it to the shlib
937 startup breakpoints. (If the user had also set bp's on
938 "main" from the old (parent) process, then they'll auto-
939 matically get reset there in the new process.). */
940 }
941
942 /* Non-zero if we just simulating a single-step. This is needed
943 because we cannot remove the breakpoints in the inferior process
944 until after the `wait' in `wait_for_inferior'. */
945 static int singlestep_breakpoints_inserted_p = 0;
946
947 /* The thread we inserted single-step breakpoints for. */
948 static ptid_t singlestep_ptid;
949
950 /* PC when we started this single-step. */
951 static CORE_ADDR singlestep_pc;
952
953 /* If another thread hit the singlestep breakpoint, we save the original
954 thread here so that we can resume single-stepping it later. */
955 static ptid_t saved_singlestep_ptid;
956 static int stepping_past_singlestep_breakpoint;
957
958 /* If not equal to null_ptid, this means that after stepping over breakpoint
959 is finished, we need to switch to deferred_step_ptid, and step it.
960
961 The use case is when one thread has hit a breakpoint, and then the user
962 has switched to another thread and issued 'step'. We need to step over
963 breakpoint in the thread which hit the breakpoint, but then continue
964 stepping the thread user has selected. */
965 static ptid_t deferred_step_ptid;
966 \f
967 /* Displaced stepping. */
968
969 /* In non-stop debugging mode, we must take special care to manage
970 breakpoints properly; in particular, the traditional strategy for
971 stepping a thread past a breakpoint it has hit is unsuitable.
972 'Displaced stepping' is a tactic for stepping one thread past a
973 breakpoint it has hit while ensuring that other threads running
974 concurrently will hit the breakpoint as they should.
975
976 The traditional way to step a thread T off a breakpoint in a
977 multi-threaded program in all-stop mode is as follows:
978
979 a0) Initially, all threads are stopped, and breakpoints are not
980 inserted.
981 a1) We single-step T, leaving breakpoints uninserted.
982 a2) We insert breakpoints, and resume all threads.
983
984 In non-stop debugging, however, this strategy is unsuitable: we
985 don't want to have to stop all threads in the system in order to
986 continue or step T past a breakpoint. Instead, we use displaced
987 stepping:
988
989 n0) Initially, T is stopped, other threads are running, and
990 breakpoints are inserted.
991 n1) We copy the instruction "under" the breakpoint to a separate
992 location, outside the main code stream, making any adjustments
993 to the instruction, register, and memory state as directed by
994 T's architecture.
995 n2) We single-step T over the instruction at its new location.
996 n3) We adjust the resulting register and memory state as directed
997 by T's architecture. This includes resetting T's PC to point
998 back into the main instruction stream.
999 n4) We resume T.
1000
1001 This approach depends on the following gdbarch methods:
1002
1003 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1004 indicate where to copy the instruction, and how much space must
1005 be reserved there. We use these in step n1.
1006
1007 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1008 address, and makes any necessary adjustments to the instruction,
1009 register contents, and memory. We use this in step n1.
1010
1011 - gdbarch_displaced_step_fixup adjusts registers and memory after
1012 we have successfuly single-stepped the instruction, to yield the
1013 same effect the instruction would have had if we had executed it
1014 at its original address. We use this in step n3.
1015
1016 - gdbarch_displaced_step_free_closure provides cleanup.
1017
1018 The gdbarch_displaced_step_copy_insn and
1019 gdbarch_displaced_step_fixup functions must be written so that
1020 copying an instruction with gdbarch_displaced_step_copy_insn,
1021 single-stepping across the copied instruction, and then applying
1022 gdbarch_displaced_insn_fixup should have the same effects on the
1023 thread's memory and registers as stepping the instruction in place
1024 would have. Exactly which responsibilities fall to the copy and
1025 which fall to the fixup is up to the author of those functions.
1026
1027 See the comments in gdbarch.sh for details.
1028
1029 Note that displaced stepping and software single-step cannot
1030 currently be used in combination, although with some care I think
1031 they could be made to. Software single-step works by placing
1032 breakpoints on all possible subsequent instructions; if the
1033 displaced instruction is a PC-relative jump, those breakpoints
1034 could fall in very strange places --- on pages that aren't
1035 executable, or at addresses that are not proper instruction
1036 boundaries. (We do generally let other threads run while we wait
1037 to hit the software single-step breakpoint, and they might
1038 encounter such a corrupted instruction.) One way to work around
1039 this would be to have gdbarch_displaced_step_copy_insn fully
1040 simulate the effect of PC-relative instructions (and return NULL)
1041 on architectures that use software single-stepping.
1042
1043 In non-stop mode, we can have independent and simultaneous step
1044 requests, so more than one thread may need to simultaneously step
1045 over a breakpoint. The current implementation assumes there is
1046 only one scratch space per process. In this case, we have to
1047 serialize access to the scratch space. If thread A wants to step
1048 over a breakpoint, but we are currently waiting for some other
1049 thread to complete a displaced step, we leave thread A stopped and
1050 place it in the displaced_step_request_queue. Whenever a displaced
1051 step finishes, we pick the next thread in the queue and start a new
1052 displaced step operation on it. See displaced_step_prepare and
1053 displaced_step_fixup for details. */
1054
1055 struct displaced_step_request
1056 {
1057 ptid_t ptid;
1058 struct displaced_step_request *next;
1059 };
1060
1061 /* Per-inferior displaced stepping state. */
1062 struct displaced_step_inferior_state
1063 {
1064 /* Pointer to next in linked list. */
1065 struct displaced_step_inferior_state *next;
1066
1067 /* The process this displaced step state refers to. */
1068 int pid;
1069
1070 /* A queue of pending displaced stepping requests. One entry per
1071 thread that needs to do a displaced step. */
1072 struct displaced_step_request *step_request_queue;
1073
1074 /* If this is not null_ptid, this is the thread carrying out a
1075 displaced single-step in process PID. This thread's state will
1076 require fixing up once it has completed its step. */
1077 ptid_t step_ptid;
1078
1079 /* The architecture the thread had when we stepped it. */
1080 struct gdbarch *step_gdbarch;
1081
1082 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1083 for post-step cleanup. */
1084 struct displaced_step_closure *step_closure;
1085
1086 /* The address of the original instruction, and the copy we
1087 made. */
1088 CORE_ADDR step_original, step_copy;
1089
1090 /* Saved contents of copy area. */
1091 gdb_byte *step_saved_copy;
1092 };
1093
1094 /* The list of states of processes involved in displaced stepping
1095 presently. */
1096 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1097
1098 /* Get the displaced stepping state of process PID. */
1099
1100 static struct displaced_step_inferior_state *
1101 get_displaced_stepping_state (int pid)
1102 {
1103 struct displaced_step_inferior_state *state;
1104
1105 for (state = displaced_step_inferior_states;
1106 state != NULL;
1107 state = state->next)
1108 if (state->pid == pid)
1109 return state;
1110
1111 return NULL;
1112 }
1113
1114 /* Add a new displaced stepping state for process PID to the displaced
1115 stepping state list, or return a pointer to an already existing
1116 entry, if it already exists. Never returns NULL. */
1117
1118 static struct displaced_step_inferior_state *
1119 add_displaced_stepping_state (int pid)
1120 {
1121 struct displaced_step_inferior_state *state;
1122
1123 for (state = displaced_step_inferior_states;
1124 state != NULL;
1125 state = state->next)
1126 if (state->pid == pid)
1127 return state;
1128
1129 state = xcalloc (1, sizeof (*state));
1130 state->pid = pid;
1131 state->next = displaced_step_inferior_states;
1132 displaced_step_inferior_states = state;
1133
1134 return state;
1135 }
1136
1137 /* If inferior is in displaced stepping, and ADDR equals to starting address
1138 of copy area, return corresponding displaced_step_closure. Otherwise,
1139 return NULL. */
1140
1141 struct displaced_step_closure*
1142 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1143 {
1144 struct displaced_step_inferior_state *displaced
1145 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1146
1147 /* If checking the mode of displaced instruction in copy area. */
1148 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1149 && (displaced->step_copy == addr))
1150 return displaced->step_closure;
1151
1152 return NULL;
1153 }
1154
1155 /* Remove the displaced stepping state of process PID. */
1156
1157 static void
1158 remove_displaced_stepping_state (int pid)
1159 {
1160 struct displaced_step_inferior_state *it, **prev_next_p;
1161
1162 gdb_assert (pid != 0);
1163
1164 it = displaced_step_inferior_states;
1165 prev_next_p = &displaced_step_inferior_states;
1166 while (it)
1167 {
1168 if (it->pid == pid)
1169 {
1170 *prev_next_p = it->next;
1171 xfree (it);
1172 return;
1173 }
1174
1175 prev_next_p = &it->next;
1176 it = *prev_next_p;
1177 }
1178 }
1179
1180 static void
1181 infrun_inferior_exit (struct inferior *inf)
1182 {
1183 remove_displaced_stepping_state (inf->pid);
1184 }
1185
1186 /* Enum strings for "set|show displaced-stepping". */
1187
1188 static const char can_use_displaced_stepping_auto[] = "auto";
1189 static const char can_use_displaced_stepping_on[] = "on";
1190 static const char can_use_displaced_stepping_off[] = "off";
1191 static const char *const can_use_displaced_stepping_enum[] =
1192 {
1193 can_use_displaced_stepping_auto,
1194 can_use_displaced_stepping_on,
1195 can_use_displaced_stepping_off,
1196 NULL,
1197 };
1198
1199 /* If ON, and the architecture supports it, GDB will use displaced
1200 stepping to step over breakpoints. If OFF, or if the architecture
1201 doesn't support it, GDB will instead use the traditional
1202 hold-and-step approach. If AUTO (which is the default), GDB will
1203 decide which technique to use to step over breakpoints depending on
1204 which of all-stop or non-stop mode is active --- displaced stepping
1205 in non-stop mode; hold-and-step in all-stop mode. */
1206
1207 static const char *can_use_displaced_stepping =
1208 can_use_displaced_stepping_auto;
1209
1210 static void
1211 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1212 struct cmd_list_element *c,
1213 const char *value)
1214 {
1215 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1216 fprintf_filtered (file,
1217 _("Debugger's willingness to use displaced stepping "
1218 "to step over breakpoints is %s (currently %s).\n"),
1219 value, non_stop ? "on" : "off");
1220 else
1221 fprintf_filtered (file,
1222 _("Debugger's willingness to use displaced stepping "
1223 "to step over breakpoints is %s.\n"), value);
1224 }
1225
1226 /* Return non-zero if displaced stepping can/should be used to step
1227 over breakpoints. */
1228
1229 static int
1230 use_displaced_stepping (struct gdbarch *gdbarch)
1231 {
1232 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1233 && non_stop)
1234 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1235 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1236 && !RECORD_IS_USED);
1237 }
1238
1239 /* Clean out any stray displaced stepping state. */
1240 static void
1241 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1242 {
1243 /* Indicate that there is no cleanup pending. */
1244 displaced->step_ptid = null_ptid;
1245
1246 if (displaced->step_closure)
1247 {
1248 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1249 displaced->step_closure);
1250 displaced->step_closure = NULL;
1251 }
1252 }
1253
1254 static void
1255 displaced_step_clear_cleanup (void *arg)
1256 {
1257 struct displaced_step_inferior_state *state = arg;
1258
1259 displaced_step_clear (state);
1260 }
1261
1262 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1263 void
1264 displaced_step_dump_bytes (struct ui_file *file,
1265 const gdb_byte *buf,
1266 size_t len)
1267 {
1268 int i;
1269
1270 for (i = 0; i < len; i++)
1271 fprintf_unfiltered (file, "%02x ", buf[i]);
1272 fputs_unfiltered ("\n", file);
1273 }
1274
1275 /* Prepare to single-step, using displaced stepping.
1276
1277 Note that we cannot use displaced stepping when we have a signal to
1278 deliver. If we have a signal to deliver and an instruction to step
1279 over, then after the step, there will be no indication from the
1280 target whether the thread entered a signal handler or ignored the
1281 signal and stepped over the instruction successfully --- both cases
1282 result in a simple SIGTRAP. In the first case we mustn't do a
1283 fixup, and in the second case we must --- but we can't tell which.
1284 Comments in the code for 'random signals' in handle_inferior_event
1285 explain how we handle this case instead.
1286
1287 Returns 1 if preparing was successful -- this thread is going to be
1288 stepped now; or 0 if displaced stepping this thread got queued. */
1289 static int
1290 displaced_step_prepare (ptid_t ptid)
1291 {
1292 struct cleanup *old_cleanups, *ignore_cleanups;
1293 struct regcache *regcache = get_thread_regcache (ptid);
1294 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1295 CORE_ADDR original, copy;
1296 ULONGEST len;
1297 struct displaced_step_closure *closure;
1298 struct displaced_step_inferior_state *displaced;
1299
1300 /* We should never reach this function if the architecture does not
1301 support displaced stepping. */
1302 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1303
1304 /* We have to displaced step one thread at a time, as we only have
1305 access to a single scratch space per inferior. */
1306
1307 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1308
1309 if (!ptid_equal (displaced->step_ptid, null_ptid))
1310 {
1311 /* Already waiting for a displaced step to finish. Defer this
1312 request and place in queue. */
1313 struct displaced_step_request *req, *new_req;
1314
1315 if (debug_displaced)
1316 fprintf_unfiltered (gdb_stdlog,
1317 "displaced: defering step of %s\n",
1318 target_pid_to_str (ptid));
1319
1320 new_req = xmalloc (sizeof (*new_req));
1321 new_req->ptid = ptid;
1322 new_req->next = NULL;
1323
1324 if (displaced->step_request_queue)
1325 {
1326 for (req = displaced->step_request_queue;
1327 req && req->next;
1328 req = req->next)
1329 ;
1330 req->next = new_req;
1331 }
1332 else
1333 displaced->step_request_queue = new_req;
1334
1335 return 0;
1336 }
1337 else
1338 {
1339 if (debug_displaced)
1340 fprintf_unfiltered (gdb_stdlog,
1341 "displaced: stepping %s now\n",
1342 target_pid_to_str (ptid));
1343 }
1344
1345 displaced_step_clear (displaced);
1346
1347 old_cleanups = save_inferior_ptid ();
1348 inferior_ptid = ptid;
1349
1350 original = regcache_read_pc (regcache);
1351
1352 copy = gdbarch_displaced_step_location (gdbarch);
1353 len = gdbarch_max_insn_length (gdbarch);
1354
1355 /* Save the original contents of the copy area. */
1356 displaced->step_saved_copy = xmalloc (len);
1357 ignore_cleanups = make_cleanup (free_current_contents,
1358 &displaced->step_saved_copy);
1359 read_memory (copy, displaced->step_saved_copy, len);
1360 if (debug_displaced)
1361 {
1362 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1363 paddress (gdbarch, copy));
1364 displaced_step_dump_bytes (gdb_stdlog,
1365 displaced->step_saved_copy,
1366 len);
1367 };
1368
1369 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1370 original, copy, regcache);
1371
1372 /* We don't support the fully-simulated case at present. */
1373 gdb_assert (closure);
1374
1375 /* Save the information we need to fix things up if the step
1376 succeeds. */
1377 displaced->step_ptid = ptid;
1378 displaced->step_gdbarch = gdbarch;
1379 displaced->step_closure = closure;
1380 displaced->step_original = original;
1381 displaced->step_copy = copy;
1382
1383 make_cleanup (displaced_step_clear_cleanup, displaced);
1384
1385 /* Resume execution at the copy. */
1386 regcache_write_pc (regcache, copy);
1387
1388 discard_cleanups (ignore_cleanups);
1389
1390 do_cleanups (old_cleanups);
1391
1392 if (debug_displaced)
1393 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1394 paddress (gdbarch, copy));
1395
1396 return 1;
1397 }
1398
1399 static void
1400 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1401 const gdb_byte *myaddr, int len)
1402 {
1403 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1404
1405 inferior_ptid = ptid;
1406 write_memory (memaddr, myaddr, len);
1407 do_cleanups (ptid_cleanup);
1408 }
1409
1410 /* Restore the contents of the copy area for thread PTID. */
1411
1412 static void
1413 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1414 ptid_t ptid)
1415 {
1416 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1417
1418 write_memory_ptid (ptid, displaced->step_copy,
1419 displaced->step_saved_copy, len);
1420 if (debug_displaced)
1421 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1422 target_pid_to_str (ptid),
1423 paddress (displaced->step_gdbarch,
1424 displaced->step_copy));
1425 }
1426
1427 static void
1428 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1429 {
1430 struct cleanup *old_cleanups;
1431 struct displaced_step_inferior_state *displaced
1432 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1433
1434 /* Was any thread of this process doing a displaced step? */
1435 if (displaced == NULL)
1436 return;
1437
1438 /* Was this event for the pid we displaced? */
1439 if (ptid_equal (displaced->step_ptid, null_ptid)
1440 || ! ptid_equal (displaced->step_ptid, event_ptid))
1441 return;
1442
1443 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1444
1445 displaced_step_restore (displaced, displaced->step_ptid);
1446
1447 /* Did the instruction complete successfully? */
1448 if (signal == TARGET_SIGNAL_TRAP)
1449 {
1450 /* Fix up the resulting state. */
1451 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1452 displaced->step_closure,
1453 displaced->step_original,
1454 displaced->step_copy,
1455 get_thread_regcache (displaced->step_ptid));
1456 }
1457 else
1458 {
1459 /* Since the instruction didn't complete, all we can do is
1460 relocate the PC. */
1461 struct regcache *regcache = get_thread_regcache (event_ptid);
1462 CORE_ADDR pc = regcache_read_pc (regcache);
1463
1464 pc = displaced->step_original + (pc - displaced->step_copy);
1465 regcache_write_pc (regcache, pc);
1466 }
1467
1468 do_cleanups (old_cleanups);
1469
1470 displaced->step_ptid = null_ptid;
1471
1472 /* Are there any pending displaced stepping requests? If so, run
1473 one now. Leave the state object around, since we're likely to
1474 need it again soon. */
1475 while (displaced->step_request_queue)
1476 {
1477 struct displaced_step_request *head;
1478 ptid_t ptid;
1479 struct regcache *regcache;
1480 struct gdbarch *gdbarch;
1481 CORE_ADDR actual_pc;
1482 struct address_space *aspace;
1483
1484 head = displaced->step_request_queue;
1485 ptid = head->ptid;
1486 displaced->step_request_queue = head->next;
1487 xfree (head);
1488
1489 context_switch (ptid);
1490
1491 regcache = get_thread_regcache (ptid);
1492 actual_pc = regcache_read_pc (regcache);
1493 aspace = get_regcache_aspace (regcache);
1494
1495 if (breakpoint_here_p (aspace, actual_pc))
1496 {
1497 if (debug_displaced)
1498 fprintf_unfiltered (gdb_stdlog,
1499 "displaced: stepping queued %s now\n",
1500 target_pid_to_str (ptid));
1501
1502 displaced_step_prepare (ptid);
1503
1504 gdbarch = get_regcache_arch (regcache);
1505
1506 if (debug_displaced)
1507 {
1508 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1509 gdb_byte buf[4];
1510
1511 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1512 paddress (gdbarch, actual_pc));
1513 read_memory (actual_pc, buf, sizeof (buf));
1514 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1515 }
1516
1517 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1518 displaced->step_closure))
1519 target_resume (ptid, 1, TARGET_SIGNAL_0);
1520 else
1521 target_resume (ptid, 0, TARGET_SIGNAL_0);
1522
1523 /* Done, we're stepping a thread. */
1524 break;
1525 }
1526 else
1527 {
1528 int step;
1529 struct thread_info *tp = inferior_thread ();
1530
1531 /* The breakpoint we were sitting under has since been
1532 removed. */
1533 tp->control.trap_expected = 0;
1534
1535 /* Go back to what we were trying to do. */
1536 step = currently_stepping (tp);
1537
1538 if (debug_displaced)
1539 fprintf_unfiltered (gdb_stdlog,
1540 "breakpoint is gone %s: step(%d)\n",
1541 target_pid_to_str (tp->ptid), step);
1542
1543 target_resume (ptid, step, TARGET_SIGNAL_0);
1544 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1545
1546 /* This request was discarded. See if there's any other
1547 thread waiting for its turn. */
1548 }
1549 }
1550 }
1551
1552 /* Update global variables holding ptids to hold NEW_PTID if they were
1553 holding OLD_PTID. */
1554 static void
1555 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1556 {
1557 struct displaced_step_request *it;
1558 struct displaced_step_inferior_state *displaced;
1559
1560 if (ptid_equal (inferior_ptid, old_ptid))
1561 inferior_ptid = new_ptid;
1562
1563 if (ptid_equal (singlestep_ptid, old_ptid))
1564 singlestep_ptid = new_ptid;
1565
1566 if (ptid_equal (deferred_step_ptid, old_ptid))
1567 deferred_step_ptid = new_ptid;
1568
1569 for (displaced = displaced_step_inferior_states;
1570 displaced;
1571 displaced = displaced->next)
1572 {
1573 if (ptid_equal (displaced->step_ptid, old_ptid))
1574 displaced->step_ptid = new_ptid;
1575
1576 for (it = displaced->step_request_queue; it; it = it->next)
1577 if (ptid_equal (it->ptid, old_ptid))
1578 it->ptid = new_ptid;
1579 }
1580 }
1581
1582 \f
1583 /* Resuming. */
1584
1585 /* Things to clean up if we QUIT out of resume (). */
1586 static void
1587 resume_cleanups (void *ignore)
1588 {
1589 normal_stop ();
1590 }
1591
1592 static const char schedlock_off[] = "off";
1593 static const char schedlock_on[] = "on";
1594 static const char schedlock_step[] = "step";
1595 static const char *const scheduler_enums[] = {
1596 schedlock_off,
1597 schedlock_on,
1598 schedlock_step,
1599 NULL
1600 };
1601 static const char *scheduler_mode = schedlock_off;
1602 static void
1603 show_scheduler_mode (struct ui_file *file, int from_tty,
1604 struct cmd_list_element *c, const char *value)
1605 {
1606 fprintf_filtered (file,
1607 _("Mode for locking scheduler "
1608 "during execution is \"%s\".\n"),
1609 value);
1610 }
1611
1612 static void
1613 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1614 {
1615 if (!target_can_lock_scheduler)
1616 {
1617 scheduler_mode = schedlock_off;
1618 error (_("Target '%s' cannot support this command."), target_shortname);
1619 }
1620 }
1621
1622 /* True if execution commands resume all threads of all processes by
1623 default; otherwise, resume only threads of the current inferior
1624 process. */
1625 int sched_multi = 0;
1626
1627 /* Try to setup for software single stepping over the specified location.
1628 Return 1 if target_resume() should use hardware single step.
1629
1630 GDBARCH the current gdbarch.
1631 PC the location to step over. */
1632
1633 static int
1634 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1635 {
1636 int hw_step = 1;
1637
1638 if (execution_direction == EXEC_FORWARD
1639 && gdbarch_software_single_step_p (gdbarch)
1640 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1641 {
1642 hw_step = 0;
1643 /* Do not pull these breakpoints until after a `wait' in
1644 `wait_for_inferior'. */
1645 singlestep_breakpoints_inserted_p = 1;
1646 singlestep_ptid = inferior_ptid;
1647 singlestep_pc = pc;
1648 }
1649 return hw_step;
1650 }
1651
1652 /* Return a ptid representing the set of threads that we will proceed,
1653 in the perspective of the user/frontend. We may actually resume
1654 fewer threads at first, e.g., if a thread is stopped at a
1655 breakpoint that needs stepping-off, but that should not be visible
1656 to the user/frontend, and neither should the frontend/user be
1657 allowed to proceed any of the threads that happen to be stopped for
1658 internal run control handling, if a previous command wanted them
1659 resumed. */
1660
1661 ptid_t
1662 user_visible_resume_ptid (int step)
1663 {
1664 /* By default, resume all threads of all processes. */
1665 ptid_t resume_ptid = RESUME_ALL;
1666
1667 /* Maybe resume only all threads of the current process. */
1668 if (!sched_multi && target_supports_multi_process ())
1669 {
1670 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1671 }
1672
1673 /* Maybe resume a single thread after all. */
1674 if (non_stop)
1675 {
1676 /* With non-stop mode on, threads are always handled
1677 individually. */
1678 resume_ptid = inferior_ptid;
1679 }
1680 else if ((scheduler_mode == schedlock_on)
1681 || (scheduler_mode == schedlock_step
1682 && (step || singlestep_breakpoints_inserted_p)))
1683 {
1684 /* User-settable 'scheduler' mode requires solo thread resume. */
1685 resume_ptid = inferior_ptid;
1686 }
1687
1688 return resume_ptid;
1689 }
1690
1691 /* Resume the inferior, but allow a QUIT. This is useful if the user
1692 wants to interrupt some lengthy single-stepping operation
1693 (for child processes, the SIGINT goes to the inferior, and so
1694 we get a SIGINT random_signal, but for remote debugging and perhaps
1695 other targets, that's not true).
1696
1697 STEP nonzero if we should step (zero to continue instead).
1698 SIG is the signal to give the inferior (zero for none). */
1699 void
1700 resume (int step, enum target_signal sig)
1701 {
1702 int should_resume = 1;
1703 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1704 struct regcache *regcache = get_current_regcache ();
1705 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1706 struct thread_info *tp = inferior_thread ();
1707 CORE_ADDR pc = regcache_read_pc (regcache);
1708 struct address_space *aspace = get_regcache_aspace (regcache);
1709
1710 QUIT;
1711
1712 if (current_inferior ()->waiting_for_vfork_done)
1713 {
1714 /* Don't try to single-step a vfork parent that is waiting for
1715 the child to get out of the shared memory region (by exec'ing
1716 or exiting). This is particularly important on software
1717 single-step archs, as the child process would trip on the
1718 software single step breakpoint inserted for the parent
1719 process. Since the parent will not actually execute any
1720 instruction until the child is out of the shared region (such
1721 are vfork's semantics), it is safe to simply continue it.
1722 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1723 the parent, and tell it to `keep_going', which automatically
1724 re-sets it stepping. */
1725 if (debug_infrun)
1726 fprintf_unfiltered (gdb_stdlog,
1727 "infrun: resume : clear step\n");
1728 step = 0;
1729 }
1730
1731 if (debug_infrun)
1732 fprintf_unfiltered (gdb_stdlog,
1733 "infrun: resume (step=%d, signal=%d), "
1734 "trap_expected=%d, current thread [%s] at %s\n",
1735 step, sig, tp->control.trap_expected,
1736 target_pid_to_str (inferior_ptid),
1737 paddress (gdbarch, pc));
1738
1739 /* Normally, by the time we reach `resume', the breakpoints are either
1740 removed or inserted, as appropriate. The exception is if we're sitting
1741 at a permanent breakpoint; we need to step over it, but permanent
1742 breakpoints can't be removed. So we have to test for it here. */
1743 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1744 {
1745 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1746 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1747 else
1748 error (_("\
1749 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1750 how to step past a permanent breakpoint on this architecture. Try using\n\
1751 a command like `return' or `jump' to continue execution."));
1752 }
1753
1754 /* If enabled, step over breakpoints by executing a copy of the
1755 instruction at a different address.
1756
1757 We can't use displaced stepping when we have a signal to deliver;
1758 the comments for displaced_step_prepare explain why. The
1759 comments in the handle_inferior event for dealing with 'random
1760 signals' explain what we do instead.
1761
1762 We can't use displaced stepping when we are waiting for vfork_done
1763 event, displaced stepping breaks the vfork child similarly as single
1764 step software breakpoint. */
1765 if (use_displaced_stepping (gdbarch)
1766 && (tp->control.trap_expected
1767 || (step && gdbarch_software_single_step_p (gdbarch)))
1768 && sig == TARGET_SIGNAL_0
1769 && !current_inferior ()->waiting_for_vfork_done)
1770 {
1771 struct displaced_step_inferior_state *displaced;
1772
1773 if (!displaced_step_prepare (inferior_ptid))
1774 {
1775 /* Got placed in displaced stepping queue. Will be resumed
1776 later when all the currently queued displaced stepping
1777 requests finish. The thread is not executing at this point,
1778 and the call to set_executing will be made later. But we
1779 need to call set_running here, since from frontend point of view,
1780 the thread is running. */
1781 set_running (inferior_ptid, 1);
1782 discard_cleanups (old_cleanups);
1783 return;
1784 }
1785
1786 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1787 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1788 displaced->step_closure);
1789 }
1790
1791 /* Do we need to do it the hard way, w/temp breakpoints? */
1792 else if (step)
1793 step = maybe_software_singlestep (gdbarch, pc);
1794
1795 /* Currently, our software single-step implementation leads to different
1796 results than hardware single-stepping in one situation: when stepping
1797 into delivering a signal which has an associated signal handler,
1798 hardware single-step will stop at the first instruction of the handler,
1799 while software single-step will simply skip execution of the handler.
1800
1801 For now, this difference in behavior is accepted since there is no
1802 easy way to actually implement single-stepping into a signal handler
1803 without kernel support.
1804
1805 However, there is one scenario where this difference leads to follow-on
1806 problems: if we're stepping off a breakpoint by removing all breakpoints
1807 and then single-stepping. In this case, the software single-step
1808 behavior means that even if there is a *breakpoint* in the signal
1809 handler, GDB still would not stop.
1810
1811 Fortunately, we can at least fix this particular issue. We detect
1812 here the case where we are about to deliver a signal while software
1813 single-stepping with breakpoints removed. In this situation, we
1814 revert the decisions to remove all breakpoints and insert single-
1815 step breakpoints, and instead we install a step-resume breakpoint
1816 at the current address, deliver the signal without stepping, and
1817 once we arrive back at the step-resume breakpoint, actually step
1818 over the breakpoint we originally wanted to step over. */
1819 if (singlestep_breakpoints_inserted_p
1820 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1821 {
1822 /* If we have nested signals or a pending signal is delivered
1823 immediately after a handler returns, might might already have
1824 a step-resume breakpoint set on the earlier handler. We cannot
1825 set another step-resume breakpoint; just continue on until the
1826 original breakpoint is hit. */
1827 if (tp->control.step_resume_breakpoint == NULL)
1828 {
1829 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1830 tp->step_after_step_resume_breakpoint = 1;
1831 }
1832
1833 remove_single_step_breakpoints ();
1834 singlestep_breakpoints_inserted_p = 0;
1835
1836 insert_breakpoints ();
1837 tp->control.trap_expected = 0;
1838 }
1839
1840 if (should_resume)
1841 {
1842 ptid_t resume_ptid;
1843
1844 /* If STEP is set, it's a request to use hardware stepping
1845 facilities. But in that case, we should never
1846 use singlestep breakpoint. */
1847 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1848
1849 /* Decide the set of threads to ask the target to resume. Start
1850 by assuming everything will be resumed, than narrow the set
1851 by applying increasingly restricting conditions. */
1852 resume_ptid = user_visible_resume_ptid (step);
1853
1854 /* Maybe resume a single thread after all. */
1855 if (singlestep_breakpoints_inserted_p
1856 && stepping_past_singlestep_breakpoint)
1857 {
1858 /* The situation here is as follows. In thread T1 we wanted to
1859 single-step. Lacking hardware single-stepping we've
1860 set breakpoint at the PC of the next instruction -- call it
1861 P. After resuming, we've hit that breakpoint in thread T2.
1862 Now we've removed original breakpoint, inserted breakpoint
1863 at P+1, and try to step to advance T2 past breakpoint.
1864 We need to step only T2, as if T1 is allowed to freely run,
1865 it can run past P, and if other threads are allowed to run,
1866 they can hit breakpoint at P+1, and nested hits of single-step
1867 breakpoints is not something we'd want -- that's complicated
1868 to support, and has no value. */
1869 resume_ptid = inferior_ptid;
1870 }
1871 else if ((step || singlestep_breakpoints_inserted_p)
1872 && tp->control.trap_expected)
1873 {
1874 /* We're allowing a thread to run past a breakpoint it has
1875 hit, by single-stepping the thread with the breakpoint
1876 removed. In which case, we need to single-step only this
1877 thread, and keep others stopped, as they can miss this
1878 breakpoint if allowed to run.
1879
1880 The current code actually removes all breakpoints when
1881 doing this, not just the one being stepped over, so if we
1882 let other threads run, we can actually miss any
1883 breakpoint, not just the one at PC. */
1884 resume_ptid = inferior_ptid;
1885 }
1886
1887 if (gdbarch_cannot_step_breakpoint (gdbarch))
1888 {
1889 /* Most targets can step a breakpoint instruction, thus
1890 executing it normally. But if this one cannot, just
1891 continue and we will hit it anyway. */
1892 if (step && breakpoint_inserted_here_p (aspace, pc))
1893 step = 0;
1894 }
1895
1896 if (debug_displaced
1897 && use_displaced_stepping (gdbarch)
1898 && tp->control.trap_expected)
1899 {
1900 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1901 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1902 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1903 gdb_byte buf[4];
1904
1905 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1906 paddress (resume_gdbarch, actual_pc));
1907 read_memory (actual_pc, buf, sizeof (buf));
1908 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1909 }
1910
1911 /* Install inferior's terminal modes. */
1912 target_terminal_inferior ();
1913
1914 /* Avoid confusing the next resume, if the next stop/resume
1915 happens to apply to another thread. */
1916 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1917
1918 /* Advise target which signals may be handled silently. If we have
1919 removed breakpoints because we are stepping over one (which can
1920 happen only if we are not using displaced stepping), we need to
1921 receive all signals to avoid accidentally skipping a breakpoint
1922 during execution of a signal handler. */
1923 if ((step || singlestep_breakpoints_inserted_p)
1924 && tp->control.trap_expected
1925 && !use_displaced_stepping (gdbarch))
1926 target_pass_signals (0, NULL);
1927 else
1928 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1929
1930 target_resume (resume_ptid, step, sig);
1931 }
1932
1933 discard_cleanups (old_cleanups);
1934 }
1935 \f
1936 /* Proceeding. */
1937
1938 /* Clear out all variables saying what to do when inferior is continued.
1939 First do this, then set the ones you want, then call `proceed'. */
1940
1941 static void
1942 clear_proceed_status_thread (struct thread_info *tp)
1943 {
1944 if (debug_infrun)
1945 fprintf_unfiltered (gdb_stdlog,
1946 "infrun: clear_proceed_status_thread (%s)\n",
1947 target_pid_to_str (tp->ptid));
1948
1949 tp->control.trap_expected = 0;
1950 tp->control.step_range_start = 0;
1951 tp->control.step_range_end = 0;
1952 tp->control.step_frame_id = null_frame_id;
1953 tp->control.step_stack_frame_id = null_frame_id;
1954 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1955 tp->stop_requested = 0;
1956
1957 tp->control.stop_step = 0;
1958
1959 tp->control.proceed_to_finish = 0;
1960
1961 /* Discard any remaining commands or status from previous stop. */
1962 bpstat_clear (&tp->control.stop_bpstat);
1963 }
1964
1965 static int
1966 clear_proceed_status_callback (struct thread_info *tp, void *data)
1967 {
1968 if (is_exited (tp->ptid))
1969 return 0;
1970
1971 clear_proceed_status_thread (tp);
1972 return 0;
1973 }
1974
1975 void
1976 clear_proceed_status (void)
1977 {
1978 if (!non_stop)
1979 {
1980 /* In all-stop mode, delete the per-thread status of all
1981 threads, even if inferior_ptid is null_ptid, there may be
1982 threads on the list. E.g., we may be launching a new
1983 process, while selecting the executable. */
1984 iterate_over_threads (clear_proceed_status_callback, NULL);
1985 }
1986
1987 if (!ptid_equal (inferior_ptid, null_ptid))
1988 {
1989 struct inferior *inferior;
1990
1991 if (non_stop)
1992 {
1993 /* If in non-stop mode, only delete the per-thread status of
1994 the current thread. */
1995 clear_proceed_status_thread (inferior_thread ());
1996 }
1997
1998 inferior = current_inferior ();
1999 inferior->control.stop_soon = NO_STOP_QUIETLY;
2000 }
2001
2002 stop_after_trap = 0;
2003
2004 observer_notify_about_to_proceed ();
2005
2006 if (stop_registers)
2007 {
2008 regcache_xfree (stop_registers);
2009 stop_registers = NULL;
2010 }
2011 }
2012
2013 /* Check the current thread against the thread that reported the most recent
2014 event. If a step-over is required return TRUE and set the current thread
2015 to the old thread. Otherwise return FALSE.
2016
2017 This should be suitable for any targets that support threads. */
2018
2019 static int
2020 prepare_to_proceed (int step)
2021 {
2022 ptid_t wait_ptid;
2023 struct target_waitstatus wait_status;
2024 int schedlock_enabled;
2025
2026 /* With non-stop mode on, threads are always handled individually. */
2027 gdb_assert (! non_stop);
2028
2029 /* Get the last target status returned by target_wait(). */
2030 get_last_target_status (&wait_ptid, &wait_status);
2031
2032 /* Make sure we were stopped at a breakpoint. */
2033 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2034 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
2035 && wait_status.value.sig != TARGET_SIGNAL_ILL
2036 && wait_status.value.sig != TARGET_SIGNAL_SEGV
2037 && wait_status.value.sig != TARGET_SIGNAL_EMT))
2038 {
2039 return 0;
2040 }
2041
2042 schedlock_enabled = (scheduler_mode == schedlock_on
2043 || (scheduler_mode == schedlock_step
2044 && step));
2045
2046 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2047 if (schedlock_enabled)
2048 return 0;
2049
2050 /* Don't switch over if we're about to resume some other process
2051 other than WAIT_PTID's, and schedule-multiple is off. */
2052 if (!sched_multi
2053 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2054 return 0;
2055
2056 /* Switched over from WAIT_PID. */
2057 if (!ptid_equal (wait_ptid, minus_one_ptid)
2058 && !ptid_equal (inferior_ptid, wait_ptid))
2059 {
2060 struct regcache *regcache = get_thread_regcache (wait_ptid);
2061
2062 if (breakpoint_here_p (get_regcache_aspace (regcache),
2063 regcache_read_pc (regcache)))
2064 {
2065 /* If stepping, remember current thread to switch back to. */
2066 if (step)
2067 deferred_step_ptid = inferior_ptid;
2068
2069 /* Switch back to WAIT_PID thread. */
2070 switch_to_thread (wait_ptid);
2071
2072 if (debug_infrun)
2073 fprintf_unfiltered (gdb_stdlog,
2074 "infrun: prepare_to_proceed (step=%d), "
2075 "switched to [%s]\n",
2076 step, target_pid_to_str (inferior_ptid));
2077
2078 /* We return 1 to indicate that there is a breakpoint here,
2079 so we need to step over it before continuing to avoid
2080 hitting it straight away. */
2081 return 1;
2082 }
2083 }
2084
2085 return 0;
2086 }
2087
2088 /* Basic routine for continuing the program in various fashions.
2089
2090 ADDR is the address to resume at, or -1 for resume where stopped.
2091 SIGGNAL is the signal to give it, or 0 for none,
2092 or -1 for act according to how it stopped.
2093 STEP is nonzero if should trap after one instruction.
2094 -1 means return after that and print nothing.
2095 You should probably set various step_... variables
2096 before calling here, if you are stepping.
2097
2098 You should call clear_proceed_status before calling proceed. */
2099
2100 void
2101 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2102 {
2103 struct regcache *regcache;
2104 struct gdbarch *gdbarch;
2105 struct thread_info *tp;
2106 CORE_ADDR pc;
2107 struct address_space *aspace;
2108 int oneproc = 0;
2109
2110 /* If we're stopped at a fork/vfork, follow the branch set by the
2111 "set follow-fork-mode" command; otherwise, we'll just proceed
2112 resuming the current thread. */
2113 if (!follow_fork ())
2114 {
2115 /* The target for some reason decided not to resume. */
2116 normal_stop ();
2117 if (target_can_async_p ())
2118 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2119 return;
2120 }
2121
2122 /* We'll update this if & when we switch to a new thread. */
2123 previous_inferior_ptid = inferior_ptid;
2124
2125 regcache = get_current_regcache ();
2126 gdbarch = get_regcache_arch (regcache);
2127 aspace = get_regcache_aspace (regcache);
2128 pc = regcache_read_pc (regcache);
2129
2130 if (step > 0)
2131 step_start_function = find_pc_function (pc);
2132 if (step < 0)
2133 stop_after_trap = 1;
2134
2135 if (addr == (CORE_ADDR) -1)
2136 {
2137 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2138 && execution_direction != EXEC_REVERSE)
2139 /* There is a breakpoint at the address we will resume at,
2140 step one instruction before inserting breakpoints so that
2141 we do not stop right away (and report a second hit at this
2142 breakpoint).
2143
2144 Note, we don't do this in reverse, because we won't
2145 actually be executing the breakpoint insn anyway.
2146 We'll be (un-)executing the previous instruction. */
2147
2148 oneproc = 1;
2149 else if (gdbarch_single_step_through_delay_p (gdbarch)
2150 && gdbarch_single_step_through_delay (gdbarch,
2151 get_current_frame ()))
2152 /* We stepped onto an instruction that needs to be stepped
2153 again before re-inserting the breakpoint, do so. */
2154 oneproc = 1;
2155 }
2156 else
2157 {
2158 regcache_write_pc (regcache, addr);
2159 }
2160
2161 if (debug_infrun)
2162 fprintf_unfiltered (gdb_stdlog,
2163 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2164 paddress (gdbarch, addr), siggnal, step);
2165
2166 if (non_stop)
2167 /* In non-stop, each thread is handled individually. The context
2168 must already be set to the right thread here. */
2169 ;
2170 else
2171 {
2172 /* In a multi-threaded task we may select another thread and
2173 then continue or step.
2174
2175 But if the old thread was stopped at a breakpoint, it will
2176 immediately cause another breakpoint stop without any
2177 execution (i.e. it will report a breakpoint hit incorrectly).
2178 So we must step over it first.
2179
2180 prepare_to_proceed checks the current thread against the
2181 thread that reported the most recent event. If a step-over
2182 is required it returns TRUE and sets the current thread to
2183 the old thread. */
2184 if (prepare_to_proceed (step))
2185 oneproc = 1;
2186 }
2187
2188 /* prepare_to_proceed may change the current thread. */
2189 tp = inferior_thread ();
2190
2191 if (oneproc)
2192 {
2193 tp->control.trap_expected = 1;
2194 /* If displaced stepping is enabled, we can step over the
2195 breakpoint without hitting it, so leave all breakpoints
2196 inserted. Otherwise we need to disable all breakpoints, step
2197 one instruction, and then re-add them when that step is
2198 finished. */
2199 if (!use_displaced_stepping (gdbarch))
2200 remove_breakpoints ();
2201 }
2202
2203 /* We can insert breakpoints if we're not trying to step over one,
2204 or if we are stepping over one but we're using displaced stepping
2205 to do so. */
2206 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2207 insert_breakpoints ();
2208
2209 if (!non_stop)
2210 {
2211 /* Pass the last stop signal to the thread we're resuming,
2212 irrespective of whether the current thread is the thread that
2213 got the last event or not. This was historically GDB's
2214 behaviour before keeping a stop_signal per thread. */
2215
2216 struct thread_info *last_thread;
2217 ptid_t last_ptid;
2218 struct target_waitstatus last_status;
2219
2220 get_last_target_status (&last_ptid, &last_status);
2221 if (!ptid_equal (inferior_ptid, last_ptid)
2222 && !ptid_equal (last_ptid, null_ptid)
2223 && !ptid_equal (last_ptid, minus_one_ptid))
2224 {
2225 last_thread = find_thread_ptid (last_ptid);
2226 if (last_thread)
2227 {
2228 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2229 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2230 }
2231 }
2232 }
2233
2234 if (siggnal != TARGET_SIGNAL_DEFAULT)
2235 tp->suspend.stop_signal = siggnal;
2236 /* If this signal should not be seen by program,
2237 give it zero. Used for debugging signals. */
2238 else if (!signal_program[tp->suspend.stop_signal])
2239 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2240
2241 annotate_starting ();
2242
2243 /* Make sure that output from GDB appears before output from the
2244 inferior. */
2245 gdb_flush (gdb_stdout);
2246
2247 /* Refresh prev_pc value just prior to resuming. This used to be
2248 done in stop_stepping, however, setting prev_pc there did not handle
2249 scenarios such as inferior function calls or returning from
2250 a function via the return command. In those cases, the prev_pc
2251 value was not set properly for subsequent commands. The prev_pc value
2252 is used to initialize the starting line number in the ecs. With an
2253 invalid value, the gdb next command ends up stopping at the position
2254 represented by the next line table entry past our start position.
2255 On platforms that generate one line table entry per line, this
2256 is not a problem. However, on the ia64, the compiler generates
2257 extraneous line table entries that do not increase the line number.
2258 When we issue the gdb next command on the ia64 after an inferior call
2259 or a return command, we often end up a few instructions forward, still
2260 within the original line we started.
2261
2262 An attempt was made to refresh the prev_pc at the same time the
2263 execution_control_state is initialized (for instance, just before
2264 waiting for an inferior event). But this approach did not work
2265 because of platforms that use ptrace, where the pc register cannot
2266 be read unless the inferior is stopped. At that point, we are not
2267 guaranteed the inferior is stopped and so the regcache_read_pc() call
2268 can fail. Setting the prev_pc value here ensures the value is updated
2269 correctly when the inferior is stopped. */
2270 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2271
2272 /* Fill in with reasonable starting values. */
2273 init_thread_stepping_state (tp);
2274
2275 /* Reset to normal state. */
2276 init_infwait_state ();
2277
2278 /* Resume inferior. */
2279 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2280
2281 /* Wait for it to stop (if not standalone)
2282 and in any case decode why it stopped, and act accordingly. */
2283 /* Do this only if we are not using the event loop, or if the target
2284 does not support asynchronous execution. */
2285 if (!target_can_async_p ())
2286 {
2287 wait_for_inferior ();
2288 normal_stop ();
2289 }
2290 }
2291 \f
2292
2293 /* Start remote-debugging of a machine over a serial link. */
2294
2295 void
2296 start_remote (int from_tty)
2297 {
2298 struct inferior *inferior;
2299
2300 inferior = current_inferior ();
2301 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2302
2303 /* Always go on waiting for the target, regardless of the mode. */
2304 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2305 indicate to wait_for_inferior that a target should timeout if
2306 nothing is returned (instead of just blocking). Because of this,
2307 targets expecting an immediate response need to, internally, set
2308 things up so that the target_wait() is forced to eventually
2309 timeout. */
2310 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2311 differentiate to its caller what the state of the target is after
2312 the initial open has been performed. Here we're assuming that
2313 the target has stopped. It should be possible to eventually have
2314 target_open() return to the caller an indication that the target
2315 is currently running and GDB state should be set to the same as
2316 for an async run. */
2317 wait_for_inferior ();
2318
2319 /* Now that the inferior has stopped, do any bookkeeping like
2320 loading shared libraries. We want to do this before normal_stop,
2321 so that the displayed frame is up to date. */
2322 post_create_inferior (&current_target, from_tty);
2323
2324 normal_stop ();
2325 }
2326
2327 /* Initialize static vars when a new inferior begins. */
2328
2329 void
2330 init_wait_for_inferior (void)
2331 {
2332 /* These are meaningless until the first time through wait_for_inferior. */
2333
2334 breakpoint_init_inferior (inf_starting);
2335
2336 clear_proceed_status ();
2337
2338 stepping_past_singlestep_breakpoint = 0;
2339 deferred_step_ptid = null_ptid;
2340
2341 target_last_wait_ptid = minus_one_ptid;
2342
2343 previous_inferior_ptid = inferior_ptid;
2344 init_infwait_state ();
2345
2346 /* Discard any skipped inlined frames. */
2347 clear_inline_frame_state (minus_one_ptid);
2348 }
2349
2350 \f
2351 /* This enum encodes possible reasons for doing a target_wait, so that
2352 wfi can call target_wait in one place. (Ultimately the call will be
2353 moved out of the infinite loop entirely.) */
2354
2355 enum infwait_states
2356 {
2357 infwait_normal_state,
2358 infwait_thread_hop_state,
2359 infwait_step_watch_state,
2360 infwait_nonstep_watch_state
2361 };
2362
2363 /* The PTID we'll do a target_wait on.*/
2364 ptid_t waiton_ptid;
2365
2366 /* Current inferior wait state. */
2367 enum infwait_states infwait_state;
2368
2369 /* Data to be passed around while handling an event. This data is
2370 discarded between events. */
2371 struct execution_control_state
2372 {
2373 ptid_t ptid;
2374 /* The thread that got the event, if this was a thread event; NULL
2375 otherwise. */
2376 struct thread_info *event_thread;
2377
2378 struct target_waitstatus ws;
2379 int random_signal;
2380 int stop_func_filled_in;
2381 CORE_ADDR stop_func_start;
2382 CORE_ADDR stop_func_end;
2383 const char *stop_func_name;
2384 int new_thread_event;
2385 int wait_some_more;
2386 };
2387
2388 static void handle_inferior_event (struct execution_control_state *ecs);
2389
2390 static void handle_step_into_function (struct gdbarch *gdbarch,
2391 struct execution_control_state *ecs);
2392 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2393 struct execution_control_state *ecs);
2394 static void check_exception_resume (struct execution_control_state *,
2395 struct frame_info *, struct symbol *);
2396
2397 static void stop_stepping (struct execution_control_state *ecs);
2398 static void prepare_to_wait (struct execution_control_state *ecs);
2399 static void keep_going (struct execution_control_state *ecs);
2400
2401 /* Callback for iterate over threads. If the thread is stopped, but
2402 the user/frontend doesn't know about that yet, go through
2403 normal_stop, as if the thread had just stopped now. ARG points at
2404 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2405 ptid_is_pid(PTID) is true, applies to all threads of the process
2406 pointed at by PTID. Otherwise, apply only to the thread pointed by
2407 PTID. */
2408
2409 static int
2410 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2411 {
2412 ptid_t ptid = * (ptid_t *) arg;
2413
2414 if ((ptid_equal (info->ptid, ptid)
2415 || ptid_equal (minus_one_ptid, ptid)
2416 || (ptid_is_pid (ptid)
2417 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2418 && is_running (info->ptid)
2419 && !is_executing (info->ptid))
2420 {
2421 struct cleanup *old_chain;
2422 struct execution_control_state ecss;
2423 struct execution_control_state *ecs = &ecss;
2424
2425 memset (ecs, 0, sizeof (*ecs));
2426
2427 old_chain = make_cleanup_restore_current_thread ();
2428
2429 switch_to_thread (info->ptid);
2430
2431 /* Go through handle_inferior_event/normal_stop, so we always
2432 have consistent output as if the stop event had been
2433 reported. */
2434 ecs->ptid = info->ptid;
2435 ecs->event_thread = find_thread_ptid (info->ptid);
2436 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2437 ecs->ws.value.sig = TARGET_SIGNAL_0;
2438
2439 handle_inferior_event (ecs);
2440
2441 if (!ecs->wait_some_more)
2442 {
2443 struct thread_info *tp;
2444
2445 normal_stop ();
2446
2447 /* Finish off the continuations. */
2448 tp = inferior_thread ();
2449 do_all_intermediate_continuations_thread (tp, 1);
2450 do_all_continuations_thread (tp, 1);
2451 }
2452
2453 do_cleanups (old_chain);
2454 }
2455
2456 return 0;
2457 }
2458
2459 /* This function is attached as a "thread_stop_requested" observer.
2460 Cleanup local state that assumed the PTID was to be resumed, and
2461 report the stop to the frontend. */
2462
2463 static void
2464 infrun_thread_stop_requested (ptid_t ptid)
2465 {
2466 struct displaced_step_inferior_state *displaced;
2467
2468 /* PTID was requested to stop. Remove it from the displaced
2469 stepping queue, so we don't try to resume it automatically. */
2470
2471 for (displaced = displaced_step_inferior_states;
2472 displaced;
2473 displaced = displaced->next)
2474 {
2475 struct displaced_step_request *it, **prev_next_p;
2476
2477 it = displaced->step_request_queue;
2478 prev_next_p = &displaced->step_request_queue;
2479 while (it)
2480 {
2481 if (ptid_match (it->ptid, ptid))
2482 {
2483 *prev_next_p = it->next;
2484 it->next = NULL;
2485 xfree (it);
2486 }
2487 else
2488 {
2489 prev_next_p = &it->next;
2490 }
2491
2492 it = *prev_next_p;
2493 }
2494 }
2495
2496 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2497 }
2498
2499 static void
2500 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2501 {
2502 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2503 nullify_last_target_wait_ptid ();
2504 }
2505
2506 /* Callback for iterate_over_threads. */
2507
2508 static int
2509 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2510 {
2511 if (is_exited (info->ptid))
2512 return 0;
2513
2514 delete_step_resume_breakpoint (info);
2515 delete_exception_resume_breakpoint (info);
2516 return 0;
2517 }
2518
2519 /* In all-stop, delete the step resume breakpoint of any thread that
2520 had one. In non-stop, delete the step resume breakpoint of the
2521 thread that just stopped. */
2522
2523 static void
2524 delete_step_thread_step_resume_breakpoint (void)
2525 {
2526 if (!target_has_execution
2527 || ptid_equal (inferior_ptid, null_ptid))
2528 /* If the inferior has exited, we have already deleted the step
2529 resume breakpoints out of GDB's lists. */
2530 return;
2531
2532 if (non_stop)
2533 {
2534 /* If in non-stop mode, only delete the step-resume or
2535 longjmp-resume breakpoint of the thread that just stopped
2536 stepping. */
2537 struct thread_info *tp = inferior_thread ();
2538
2539 delete_step_resume_breakpoint (tp);
2540 delete_exception_resume_breakpoint (tp);
2541 }
2542 else
2543 /* In all-stop mode, delete all step-resume and longjmp-resume
2544 breakpoints of any thread that had them. */
2545 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2546 }
2547
2548 /* A cleanup wrapper. */
2549
2550 static void
2551 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2552 {
2553 delete_step_thread_step_resume_breakpoint ();
2554 }
2555
2556 /* Pretty print the results of target_wait, for debugging purposes. */
2557
2558 static void
2559 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2560 const struct target_waitstatus *ws)
2561 {
2562 char *status_string = target_waitstatus_to_string (ws);
2563 struct ui_file *tmp_stream = mem_fileopen ();
2564 char *text;
2565
2566 /* The text is split over several lines because it was getting too long.
2567 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2568 output as a unit; we want only one timestamp printed if debug_timestamp
2569 is set. */
2570
2571 fprintf_unfiltered (tmp_stream,
2572 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2573 if (PIDGET (waiton_ptid) != -1)
2574 fprintf_unfiltered (tmp_stream,
2575 " [%s]", target_pid_to_str (waiton_ptid));
2576 fprintf_unfiltered (tmp_stream, ", status) =\n");
2577 fprintf_unfiltered (tmp_stream,
2578 "infrun: %d [%s],\n",
2579 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2580 fprintf_unfiltered (tmp_stream,
2581 "infrun: %s\n",
2582 status_string);
2583
2584 text = ui_file_xstrdup (tmp_stream, NULL);
2585
2586 /* This uses %s in part to handle %'s in the text, but also to avoid
2587 a gcc error: the format attribute requires a string literal. */
2588 fprintf_unfiltered (gdb_stdlog, "%s", text);
2589
2590 xfree (status_string);
2591 xfree (text);
2592 ui_file_delete (tmp_stream);
2593 }
2594
2595 /* Prepare and stabilize the inferior for detaching it. E.g.,
2596 detaching while a thread is displaced stepping is a recipe for
2597 crashing it, as nothing would readjust the PC out of the scratch
2598 pad. */
2599
2600 void
2601 prepare_for_detach (void)
2602 {
2603 struct inferior *inf = current_inferior ();
2604 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2605 struct cleanup *old_chain_1;
2606 struct displaced_step_inferior_state *displaced;
2607
2608 displaced = get_displaced_stepping_state (inf->pid);
2609
2610 /* Is any thread of this process displaced stepping? If not,
2611 there's nothing else to do. */
2612 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2613 return;
2614
2615 if (debug_infrun)
2616 fprintf_unfiltered (gdb_stdlog,
2617 "displaced-stepping in-process while detaching");
2618
2619 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2620 inf->detaching = 1;
2621
2622 while (!ptid_equal (displaced->step_ptid, null_ptid))
2623 {
2624 struct cleanup *old_chain_2;
2625 struct execution_control_state ecss;
2626 struct execution_control_state *ecs;
2627
2628 ecs = &ecss;
2629 memset (ecs, 0, sizeof (*ecs));
2630
2631 overlay_cache_invalid = 1;
2632
2633 if (deprecated_target_wait_hook)
2634 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2635 else
2636 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2637
2638 if (debug_infrun)
2639 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2640
2641 /* If an error happens while handling the event, propagate GDB's
2642 knowledge of the executing state to the frontend/user running
2643 state. */
2644 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2645 &minus_one_ptid);
2646
2647 /* In non-stop mode, each thread is handled individually.
2648 Switch early, so the global state is set correctly for this
2649 thread. */
2650 if (non_stop
2651 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2652 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2653 context_switch (ecs->ptid);
2654
2655 /* Now figure out what to do with the result of the result. */
2656 handle_inferior_event (ecs);
2657
2658 /* No error, don't finish the state yet. */
2659 discard_cleanups (old_chain_2);
2660
2661 /* Breakpoints and watchpoints are not installed on the target
2662 at this point, and signals are passed directly to the
2663 inferior, so this must mean the process is gone. */
2664 if (!ecs->wait_some_more)
2665 {
2666 discard_cleanups (old_chain_1);
2667 error (_("Program exited while detaching"));
2668 }
2669 }
2670
2671 discard_cleanups (old_chain_1);
2672 }
2673
2674 /* Wait for control to return from inferior to debugger.
2675
2676 If inferior gets a signal, we may decide to start it up again
2677 instead of returning. That is why there is a loop in this function.
2678 When this function actually returns it means the inferior
2679 should be left stopped and GDB should read more commands. */
2680
2681 void
2682 wait_for_inferior (void)
2683 {
2684 struct cleanup *old_cleanups;
2685 struct execution_control_state ecss;
2686 struct execution_control_state *ecs;
2687
2688 if (debug_infrun)
2689 fprintf_unfiltered
2690 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2691
2692 old_cleanups =
2693 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2694
2695 ecs = &ecss;
2696 memset (ecs, 0, sizeof (*ecs));
2697
2698 while (1)
2699 {
2700 struct cleanup *old_chain;
2701
2702 overlay_cache_invalid = 1;
2703
2704 if (deprecated_target_wait_hook)
2705 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2706 else
2707 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2708
2709 if (debug_infrun)
2710 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2711
2712 /* If an error happens while handling the event, propagate GDB's
2713 knowledge of the executing state to the frontend/user running
2714 state. */
2715 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2716
2717 /* Now figure out what to do with the result of the result. */
2718 handle_inferior_event (ecs);
2719
2720 /* No error, don't finish the state yet. */
2721 discard_cleanups (old_chain);
2722
2723 if (!ecs->wait_some_more)
2724 break;
2725 }
2726
2727 do_cleanups (old_cleanups);
2728 }
2729
2730 /* Asynchronous version of wait_for_inferior. It is called by the
2731 event loop whenever a change of state is detected on the file
2732 descriptor corresponding to the target. It can be called more than
2733 once to complete a single execution command. In such cases we need
2734 to keep the state in a global variable ECSS. If it is the last time
2735 that this function is called for a single execution command, then
2736 report to the user that the inferior has stopped, and do the
2737 necessary cleanups. */
2738
2739 void
2740 fetch_inferior_event (void *client_data)
2741 {
2742 struct execution_control_state ecss;
2743 struct execution_control_state *ecs = &ecss;
2744 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2745 struct cleanup *ts_old_chain;
2746 int was_sync = sync_execution;
2747 int cmd_done = 0;
2748
2749 memset (ecs, 0, sizeof (*ecs));
2750
2751 /* We're handling a live event, so make sure we're doing live
2752 debugging. If we're looking at traceframes while the target is
2753 running, we're going to need to get back to that mode after
2754 handling the event. */
2755 if (non_stop)
2756 {
2757 make_cleanup_restore_current_traceframe ();
2758 set_current_traceframe (-1);
2759 }
2760
2761 if (non_stop)
2762 /* In non-stop mode, the user/frontend should not notice a thread
2763 switch due to internal events. Make sure we reverse to the
2764 user selected thread and frame after handling the event and
2765 running any breakpoint commands. */
2766 make_cleanup_restore_current_thread ();
2767
2768 overlay_cache_invalid = 1;
2769
2770 make_cleanup_restore_integer (&execution_direction);
2771 execution_direction = target_execution_direction ();
2772
2773 if (deprecated_target_wait_hook)
2774 ecs->ptid =
2775 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2776 else
2777 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2778
2779 if (debug_infrun)
2780 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2781
2782 if (non_stop
2783 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2784 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2785 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2786 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2787 /* In non-stop mode, each thread is handled individually. Switch
2788 early, so the global state is set correctly for this
2789 thread. */
2790 context_switch (ecs->ptid);
2791
2792 /* If an error happens while handling the event, propagate GDB's
2793 knowledge of the executing state to the frontend/user running
2794 state. */
2795 if (!non_stop)
2796 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2797 else
2798 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2799
2800 /* Get executed before make_cleanup_restore_current_thread above to apply
2801 still for the thread which has thrown the exception. */
2802 make_bpstat_clear_actions_cleanup ();
2803
2804 /* Now figure out what to do with the result of the result. */
2805 handle_inferior_event (ecs);
2806
2807 if (!ecs->wait_some_more)
2808 {
2809 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2810
2811 delete_step_thread_step_resume_breakpoint ();
2812
2813 /* We may not find an inferior if this was a process exit. */
2814 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2815 normal_stop ();
2816
2817 if (target_has_execution
2818 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2819 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2820 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2821 && ecs->event_thread->step_multi
2822 && ecs->event_thread->control.stop_step)
2823 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2824 else
2825 {
2826 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2827 cmd_done = 1;
2828 }
2829 }
2830
2831 /* No error, don't finish the thread states yet. */
2832 discard_cleanups (ts_old_chain);
2833
2834 /* Revert thread and frame. */
2835 do_cleanups (old_chain);
2836
2837 /* If the inferior was in sync execution mode, and now isn't,
2838 restore the prompt (a synchronous execution command has finished,
2839 and we're ready for input). */
2840 if (interpreter_async && was_sync && !sync_execution)
2841 display_gdb_prompt (0);
2842
2843 if (cmd_done
2844 && !was_sync
2845 && exec_done_display_p
2846 && (ptid_equal (inferior_ptid, null_ptid)
2847 || !is_running (inferior_ptid)))
2848 printf_unfiltered (_("completed.\n"));
2849 }
2850
2851 /* Record the frame and location we're currently stepping through. */
2852 void
2853 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2854 {
2855 struct thread_info *tp = inferior_thread ();
2856
2857 tp->control.step_frame_id = get_frame_id (frame);
2858 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2859
2860 tp->current_symtab = sal.symtab;
2861 tp->current_line = sal.line;
2862 }
2863
2864 /* Clear context switchable stepping state. */
2865
2866 void
2867 init_thread_stepping_state (struct thread_info *tss)
2868 {
2869 tss->stepping_over_breakpoint = 0;
2870 tss->step_after_step_resume_breakpoint = 0;
2871 }
2872
2873 /* Return the cached copy of the last pid/waitstatus returned by
2874 target_wait()/deprecated_target_wait_hook(). The data is actually
2875 cached by handle_inferior_event(), which gets called immediately
2876 after target_wait()/deprecated_target_wait_hook(). */
2877
2878 void
2879 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2880 {
2881 *ptidp = target_last_wait_ptid;
2882 *status = target_last_waitstatus;
2883 }
2884
2885 void
2886 nullify_last_target_wait_ptid (void)
2887 {
2888 target_last_wait_ptid = minus_one_ptid;
2889 }
2890
2891 /* Switch thread contexts. */
2892
2893 static void
2894 context_switch (ptid_t ptid)
2895 {
2896 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2897 {
2898 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2899 target_pid_to_str (inferior_ptid));
2900 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2901 target_pid_to_str (ptid));
2902 }
2903
2904 switch_to_thread (ptid);
2905 }
2906
2907 static void
2908 adjust_pc_after_break (struct execution_control_state *ecs)
2909 {
2910 struct regcache *regcache;
2911 struct gdbarch *gdbarch;
2912 struct address_space *aspace;
2913 CORE_ADDR breakpoint_pc;
2914
2915 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2916 we aren't, just return.
2917
2918 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2919 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2920 implemented by software breakpoints should be handled through the normal
2921 breakpoint layer.
2922
2923 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2924 different signals (SIGILL or SIGEMT for instance), but it is less
2925 clear where the PC is pointing afterwards. It may not match
2926 gdbarch_decr_pc_after_break. I don't know any specific target that
2927 generates these signals at breakpoints (the code has been in GDB since at
2928 least 1992) so I can not guess how to handle them here.
2929
2930 In earlier versions of GDB, a target with
2931 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2932 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2933 target with both of these set in GDB history, and it seems unlikely to be
2934 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2935
2936 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2937 return;
2938
2939 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2940 return;
2941
2942 /* In reverse execution, when a breakpoint is hit, the instruction
2943 under it has already been de-executed. The reported PC always
2944 points at the breakpoint address, so adjusting it further would
2945 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2946 architecture:
2947
2948 B1 0x08000000 : INSN1
2949 B2 0x08000001 : INSN2
2950 0x08000002 : INSN3
2951 PC -> 0x08000003 : INSN4
2952
2953 Say you're stopped at 0x08000003 as above. Reverse continuing
2954 from that point should hit B2 as below. Reading the PC when the
2955 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2956 been de-executed already.
2957
2958 B1 0x08000000 : INSN1
2959 B2 PC -> 0x08000001 : INSN2
2960 0x08000002 : INSN3
2961 0x08000003 : INSN4
2962
2963 We can't apply the same logic as for forward execution, because
2964 we would wrongly adjust the PC to 0x08000000, since there's a
2965 breakpoint at PC - 1. We'd then report a hit on B1, although
2966 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2967 behaviour. */
2968 if (execution_direction == EXEC_REVERSE)
2969 return;
2970
2971 /* If this target does not decrement the PC after breakpoints, then
2972 we have nothing to do. */
2973 regcache = get_thread_regcache (ecs->ptid);
2974 gdbarch = get_regcache_arch (regcache);
2975 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2976 return;
2977
2978 aspace = get_regcache_aspace (regcache);
2979
2980 /* Find the location where (if we've hit a breakpoint) the
2981 breakpoint would be. */
2982 breakpoint_pc = regcache_read_pc (regcache)
2983 - gdbarch_decr_pc_after_break (gdbarch);
2984
2985 /* Check whether there actually is a software breakpoint inserted at
2986 that location.
2987
2988 If in non-stop mode, a race condition is possible where we've
2989 removed a breakpoint, but stop events for that breakpoint were
2990 already queued and arrive later. To suppress those spurious
2991 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2992 and retire them after a number of stop events are reported. */
2993 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2994 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2995 {
2996 struct cleanup *old_cleanups = NULL;
2997
2998 if (RECORD_IS_USED)
2999 old_cleanups = record_gdb_operation_disable_set ();
3000
3001 /* When using hardware single-step, a SIGTRAP is reported for both
3002 a completed single-step and a software breakpoint. Need to
3003 differentiate between the two, as the latter needs adjusting
3004 but the former does not.
3005
3006 The SIGTRAP can be due to a completed hardware single-step only if
3007 - we didn't insert software single-step breakpoints
3008 - the thread to be examined is still the current thread
3009 - this thread is currently being stepped
3010
3011 If any of these events did not occur, we must have stopped due
3012 to hitting a software breakpoint, and have to back up to the
3013 breakpoint address.
3014
3015 As a special case, we could have hardware single-stepped a
3016 software breakpoint. In this case (prev_pc == breakpoint_pc),
3017 we also need to back up to the breakpoint address. */
3018
3019 if (singlestep_breakpoints_inserted_p
3020 || !ptid_equal (ecs->ptid, inferior_ptid)
3021 || !currently_stepping (ecs->event_thread)
3022 || ecs->event_thread->prev_pc == breakpoint_pc)
3023 regcache_write_pc (regcache, breakpoint_pc);
3024
3025 if (RECORD_IS_USED)
3026 do_cleanups (old_cleanups);
3027 }
3028 }
3029
3030 void
3031 init_infwait_state (void)
3032 {
3033 waiton_ptid = pid_to_ptid (-1);
3034 infwait_state = infwait_normal_state;
3035 }
3036
3037 void
3038 error_is_running (void)
3039 {
3040 error (_("Cannot execute this command while "
3041 "the selected thread is running."));
3042 }
3043
3044 void
3045 ensure_not_running (void)
3046 {
3047 if (is_running (inferior_ptid))
3048 error_is_running ();
3049 }
3050
3051 static int
3052 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3053 {
3054 for (frame = get_prev_frame (frame);
3055 frame != NULL;
3056 frame = get_prev_frame (frame))
3057 {
3058 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3059 return 1;
3060 if (get_frame_type (frame) != INLINE_FRAME)
3061 break;
3062 }
3063
3064 return 0;
3065 }
3066
3067 /* Auxiliary function that handles syscall entry/return events.
3068 It returns 1 if the inferior should keep going (and GDB
3069 should ignore the event), or 0 if the event deserves to be
3070 processed. */
3071
3072 static int
3073 handle_syscall_event (struct execution_control_state *ecs)
3074 {
3075 struct regcache *regcache;
3076 struct gdbarch *gdbarch;
3077 int syscall_number;
3078
3079 if (!ptid_equal (ecs->ptid, inferior_ptid))
3080 context_switch (ecs->ptid);
3081
3082 regcache = get_thread_regcache (ecs->ptid);
3083 gdbarch = get_regcache_arch (regcache);
3084 syscall_number = ecs->ws.value.syscall_number;
3085 stop_pc = regcache_read_pc (regcache);
3086
3087 if (catch_syscall_enabled () > 0
3088 && catching_syscall_number (syscall_number) > 0)
3089 {
3090 if (debug_infrun)
3091 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3092 syscall_number);
3093
3094 ecs->event_thread->control.stop_bpstat
3095 = bpstat_stop_status (get_regcache_aspace (regcache),
3096 stop_pc, ecs->ptid, &ecs->ws);
3097 ecs->random_signal
3098 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3099
3100 if (!ecs->random_signal)
3101 {
3102 /* Catchpoint hit. */
3103 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3104 return 0;
3105 }
3106 }
3107
3108 /* If no catchpoint triggered for this, then keep going. */
3109 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3110 keep_going (ecs);
3111 return 1;
3112 }
3113
3114 /* Clear the supplied execution_control_state's stop_func_* fields. */
3115
3116 static void
3117 clear_stop_func (struct execution_control_state *ecs)
3118 {
3119 ecs->stop_func_filled_in = 0;
3120 ecs->stop_func_start = 0;
3121 ecs->stop_func_end = 0;
3122 ecs->stop_func_name = NULL;
3123 }
3124
3125 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3126
3127 static void
3128 fill_in_stop_func (struct gdbarch *gdbarch,
3129 struct execution_control_state *ecs)
3130 {
3131 if (!ecs->stop_func_filled_in)
3132 {
3133 /* Don't care about return value; stop_func_start and stop_func_name
3134 will both be 0 if it doesn't work. */
3135 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3136 &ecs->stop_func_start, &ecs->stop_func_end);
3137 ecs->stop_func_start
3138 += gdbarch_deprecated_function_start_offset (gdbarch);
3139
3140 ecs->stop_func_filled_in = 1;
3141 }
3142 }
3143
3144 /* Given an execution control state that has been freshly filled in
3145 by an event from the inferior, figure out what it means and take
3146 appropriate action. */
3147
3148 static void
3149 handle_inferior_event (struct execution_control_state *ecs)
3150 {
3151 struct frame_info *frame;
3152 struct gdbarch *gdbarch;
3153 int stopped_by_watchpoint;
3154 int stepped_after_stopped_by_watchpoint = 0;
3155 struct symtab_and_line stop_pc_sal;
3156 enum stop_kind stop_soon;
3157
3158 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3159 {
3160 /* We had an event in the inferior, but we are not interested in
3161 handling it at this level. The lower layers have already
3162 done what needs to be done, if anything.
3163
3164 One of the possible circumstances for this is when the
3165 inferior produces output for the console. The inferior has
3166 not stopped, and we are ignoring the event. Another possible
3167 circumstance is any event which the lower level knows will be
3168 reported multiple times without an intervening resume. */
3169 if (debug_infrun)
3170 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3171 prepare_to_wait (ecs);
3172 return;
3173 }
3174
3175 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3176 && target_can_async_p () && !sync_execution)
3177 {
3178 /* There were no unwaited-for children left in the target, but,
3179 we're not synchronously waiting for events either. Just
3180 ignore. Otherwise, if we were running a synchronous
3181 execution command, we need to cancel it and give the user
3182 back the terminal. */
3183 if (debug_infrun)
3184 fprintf_unfiltered (gdb_stdlog,
3185 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3186 prepare_to_wait (ecs);
3187 return;
3188 }
3189
3190 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3191 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3192 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3193 {
3194 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3195
3196 gdb_assert (inf);
3197 stop_soon = inf->control.stop_soon;
3198 }
3199 else
3200 stop_soon = NO_STOP_QUIETLY;
3201
3202 /* Cache the last pid/waitstatus. */
3203 target_last_wait_ptid = ecs->ptid;
3204 target_last_waitstatus = ecs->ws;
3205
3206 /* Always clear state belonging to the previous time we stopped. */
3207 stop_stack_dummy = STOP_NONE;
3208
3209 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3210 {
3211 /* No unwaited-for children left. IOW, all resumed children
3212 have exited. */
3213 if (debug_infrun)
3214 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3215
3216 stop_print_frame = 0;
3217 stop_stepping (ecs);
3218 return;
3219 }
3220
3221 /* If it's a new process, add it to the thread database. */
3222
3223 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3224 && !ptid_equal (ecs->ptid, minus_one_ptid)
3225 && !in_thread_list (ecs->ptid));
3226
3227 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3228 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3229 add_thread (ecs->ptid);
3230
3231 ecs->event_thread = find_thread_ptid (ecs->ptid);
3232
3233 /* Dependent on valid ECS->EVENT_THREAD. */
3234 adjust_pc_after_break (ecs);
3235
3236 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3237 reinit_frame_cache ();
3238
3239 breakpoint_retire_moribund ();
3240
3241 /* First, distinguish signals caused by the debugger from signals
3242 that have to do with the program's own actions. Note that
3243 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3244 on the operating system version. Here we detect when a SIGILL or
3245 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3246 something similar for SIGSEGV, since a SIGSEGV will be generated
3247 when we're trying to execute a breakpoint instruction on a
3248 non-executable stack. This happens for call dummy breakpoints
3249 for architectures like SPARC that place call dummies on the
3250 stack. */
3251 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3252 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3253 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3254 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3255 {
3256 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3257
3258 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3259 regcache_read_pc (regcache)))
3260 {
3261 if (debug_infrun)
3262 fprintf_unfiltered (gdb_stdlog,
3263 "infrun: Treating signal as SIGTRAP\n");
3264 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3265 }
3266 }
3267
3268 /* Mark the non-executing threads accordingly. In all-stop, all
3269 threads of all processes are stopped when we get any event
3270 reported. In non-stop mode, only the event thread stops. If
3271 we're handling a process exit in non-stop mode, there's nothing
3272 to do, as threads of the dead process are gone, and threads of
3273 any other process were left running. */
3274 if (!non_stop)
3275 set_executing (minus_one_ptid, 0);
3276 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3277 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3278 set_executing (ecs->ptid, 0);
3279
3280 switch (infwait_state)
3281 {
3282 case infwait_thread_hop_state:
3283 if (debug_infrun)
3284 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3285 break;
3286
3287 case infwait_normal_state:
3288 if (debug_infrun)
3289 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3290 break;
3291
3292 case infwait_step_watch_state:
3293 if (debug_infrun)
3294 fprintf_unfiltered (gdb_stdlog,
3295 "infrun: infwait_step_watch_state\n");
3296
3297 stepped_after_stopped_by_watchpoint = 1;
3298 break;
3299
3300 case infwait_nonstep_watch_state:
3301 if (debug_infrun)
3302 fprintf_unfiltered (gdb_stdlog,
3303 "infrun: infwait_nonstep_watch_state\n");
3304 insert_breakpoints ();
3305
3306 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3307 handle things like signals arriving and other things happening
3308 in combination correctly? */
3309 stepped_after_stopped_by_watchpoint = 1;
3310 break;
3311
3312 default:
3313 internal_error (__FILE__, __LINE__, _("bad switch"));
3314 }
3315
3316 infwait_state = infwait_normal_state;
3317 waiton_ptid = pid_to_ptid (-1);
3318
3319 switch (ecs->ws.kind)
3320 {
3321 case TARGET_WAITKIND_LOADED:
3322 if (debug_infrun)
3323 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3324 /* Ignore gracefully during startup of the inferior, as it might
3325 be the shell which has just loaded some objects, otherwise
3326 add the symbols for the newly loaded objects. Also ignore at
3327 the beginning of an attach or remote session; we will query
3328 the full list of libraries once the connection is
3329 established. */
3330 if (stop_soon == NO_STOP_QUIETLY)
3331 {
3332 struct regcache *regcache;
3333
3334 if (!ptid_equal (ecs->ptid, inferior_ptid))
3335 context_switch (ecs->ptid);
3336 regcache = get_thread_regcache (ecs->ptid);
3337
3338 handle_solib_event ();
3339
3340 ecs->event_thread->control.stop_bpstat
3341 = bpstat_stop_status (get_regcache_aspace (regcache),
3342 stop_pc, ecs->ptid, &ecs->ws);
3343 ecs->random_signal
3344 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3345
3346 if (!ecs->random_signal)
3347 {
3348 /* A catchpoint triggered. */
3349 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3350 goto process_event_stop_test;
3351 }
3352
3353 /* If requested, stop when the dynamic linker notifies
3354 gdb of events. This allows the user to get control
3355 and place breakpoints in initializer routines for
3356 dynamically loaded objects (among other things). */
3357 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3358 if (stop_on_solib_events)
3359 {
3360 /* Make sure we print "Stopped due to solib-event" in
3361 normal_stop. */
3362 stop_print_frame = 1;
3363
3364 stop_stepping (ecs);
3365 return;
3366 }
3367 }
3368
3369 /* If we are skipping through a shell, or through shared library
3370 loading that we aren't interested in, resume the program. If
3371 we're running the program normally, also resume. But stop if
3372 we're attaching or setting up a remote connection. */
3373 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3374 {
3375 /* Loading of shared libraries might have changed breakpoint
3376 addresses. Make sure new breakpoints are inserted. */
3377 if (stop_soon == NO_STOP_QUIETLY
3378 && !breakpoints_always_inserted_mode ())
3379 insert_breakpoints ();
3380 resume (0, TARGET_SIGNAL_0);
3381 prepare_to_wait (ecs);
3382 return;
3383 }
3384
3385 break;
3386
3387 case TARGET_WAITKIND_SPURIOUS:
3388 if (debug_infrun)
3389 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3390 resume (0, TARGET_SIGNAL_0);
3391 prepare_to_wait (ecs);
3392 return;
3393
3394 case TARGET_WAITKIND_EXITED:
3395 if (debug_infrun)
3396 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3397 inferior_ptid = ecs->ptid;
3398 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3399 set_current_program_space (current_inferior ()->pspace);
3400 handle_vfork_child_exec_or_exit (0);
3401 target_terminal_ours (); /* Must do this before mourn anyway. */
3402 print_exited_reason (ecs->ws.value.integer);
3403
3404 /* Record the exit code in the convenience variable $_exitcode, so
3405 that the user can inspect this again later. */
3406 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3407 (LONGEST) ecs->ws.value.integer);
3408
3409 /* Also record this in the inferior itself. */
3410 current_inferior ()->has_exit_code = 1;
3411 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3412
3413 gdb_flush (gdb_stdout);
3414 target_mourn_inferior ();
3415 singlestep_breakpoints_inserted_p = 0;
3416 cancel_single_step_breakpoints ();
3417 stop_print_frame = 0;
3418 stop_stepping (ecs);
3419 return;
3420
3421 case TARGET_WAITKIND_SIGNALLED:
3422 if (debug_infrun)
3423 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3424 inferior_ptid = ecs->ptid;
3425 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3426 set_current_program_space (current_inferior ()->pspace);
3427 handle_vfork_child_exec_or_exit (0);
3428 stop_print_frame = 0;
3429 target_terminal_ours (); /* Must do this before mourn anyway. */
3430
3431 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3432 reach here unless the inferior is dead. However, for years
3433 target_kill() was called here, which hints that fatal signals aren't
3434 really fatal on some systems. If that's true, then some changes
3435 may be needed. */
3436 target_mourn_inferior ();
3437
3438 print_signal_exited_reason (ecs->ws.value.sig);
3439 singlestep_breakpoints_inserted_p = 0;
3440 cancel_single_step_breakpoints ();
3441 stop_stepping (ecs);
3442 return;
3443
3444 /* The following are the only cases in which we keep going;
3445 the above cases end in a continue or goto. */
3446 case TARGET_WAITKIND_FORKED:
3447 case TARGET_WAITKIND_VFORKED:
3448 if (debug_infrun)
3449 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3450
3451 /* Check whether the inferior is displaced stepping. */
3452 {
3453 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3454 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3455 struct displaced_step_inferior_state *displaced
3456 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3457
3458 /* If checking displaced stepping is supported, and thread
3459 ecs->ptid is displaced stepping. */
3460 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3461 {
3462 struct inferior *parent_inf
3463 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3464 struct regcache *child_regcache;
3465 CORE_ADDR parent_pc;
3466
3467 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3468 indicating that the displaced stepping of syscall instruction
3469 has been done. Perform cleanup for parent process here. Note
3470 that this operation also cleans up the child process for vfork,
3471 because their pages are shared. */
3472 displaced_step_fixup (ecs->ptid, TARGET_SIGNAL_TRAP);
3473
3474 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3475 {
3476 /* Restore scratch pad for child process. */
3477 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3478 }
3479
3480 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3481 the child's PC is also within the scratchpad. Set the child's PC
3482 to the parent's PC value, which has already been fixed up.
3483 FIXME: we use the parent's aspace here, although we're touching
3484 the child, because the child hasn't been added to the inferior
3485 list yet at this point. */
3486
3487 child_regcache
3488 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3489 gdbarch,
3490 parent_inf->aspace);
3491 /* Read PC value of parent process. */
3492 parent_pc = regcache_read_pc (regcache);
3493
3494 if (debug_displaced)
3495 fprintf_unfiltered (gdb_stdlog,
3496 "displaced: write child pc from %s to %s\n",
3497 paddress (gdbarch,
3498 regcache_read_pc (child_regcache)),
3499 paddress (gdbarch, parent_pc));
3500
3501 regcache_write_pc (child_regcache, parent_pc);
3502 }
3503 }
3504
3505 if (!ptid_equal (ecs->ptid, inferior_ptid))
3506 {
3507 context_switch (ecs->ptid);
3508 reinit_frame_cache ();
3509 }
3510
3511 /* Immediately detach breakpoints from the child before there's
3512 any chance of letting the user delete breakpoints from the
3513 breakpoint lists. If we don't do this early, it's easy to
3514 leave left over traps in the child, vis: "break foo; catch
3515 fork; c; <fork>; del; c; <child calls foo>". We only follow
3516 the fork on the last `continue', and by that time the
3517 breakpoint at "foo" is long gone from the breakpoint table.
3518 If we vforked, then we don't need to unpatch here, since both
3519 parent and child are sharing the same memory pages; we'll
3520 need to unpatch at follow/detach time instead to be certain
3521 that new breakpoints added between catchpoint hit time and
3522 vfork follow are detached. */
3523 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3524 {
3525 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3526
3527 /* This won't actually modify the breakpoint list, but will
3528 physically remove the breakpoints from the child. */
3529 detach_breakpoints (child_pid);
3530 }
3531
3532 if (singlestep_breakpoints_inserted_p)
3533 {
3534 /* Pull the single step breakpoints out of the target. */
3535 remove_single_step_breakpoints ();
3536 singlestep_breakpoints_inserted_p = 0;
3537 }
3538
3539 /* In case the event is caught by a catchpoint, remember that
3540 the event is to be followed at the next resume of the thread,
3541 and not immediately. */
3542 ecs->event_thread->pending_follow = ecs->ws;
3543
3544 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3545
3546 ecs->event_thread->control.stop_bpstat
3547 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3548 stop_pc, ecs->ptid, &ecs->ws);
3549
3550 /* Note that we're interested in knowing the bpstat actually
3551 causes a stop, not just if it may explain the signal.
3552 Software watchpoints, for example, always appear in the
3553 bpstat. */
3554 ecs->random_signal
3555 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3556
3557 /* If no catchpoint triggered for this, then keep going. */
3558 if (ecs->random_signal)
3559 {
3560 ptid_t parent;
3561 ptid_t child;
3562 int should_resume;
3563 int follow_child
3564 = (follow_fork_mode_string == follow_fork_mode_child);
3565
3566 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3567
3568 should_resume = follow_fork ();
3569
3570 parent = ecs->ptid;
3571 child = ecs->ws.value.related_pid;
3572
3573 /* In non-stop mode, also resume the other branch. */
3574 if (non_stop && !detach_fork)
3575 {
3576 if (follow_child)
3577 switch_to_thread (parent);
3578 else
3579 switch_to_thread (child);
3580
3581 ecs->event_thread = inferior_thread ();
3582 ecs->ptid = inferior_ptid;
3583 keep_going (ecs);
3584 }
3585
3586 if (follow_child)
3587 switch_to_thread (child);
3588 else
3589 switch_to_thread (parent);
3590
3591 ecs->event_thread = inferior_thread ();
3592 ecs->ptid = inferior_ptid;
3593
3594 if (should_resume)
3595 keep_going (ecs);
3596 else
3597 stop_stepping (ecs);
3598 return;
3599 }
3600 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3601 goto process_event_stop_test;
3602
3603 case TARGET_WAITKIND_VFORK_DONE:
3604 /* Done with the shared memory region. Re-insert breakpoints in
3605 the parent, and keep going. */
3606
3607 if (debug_infrun)
3608 fprintf_unfiltered (gdb_stdlog,
3609 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3610
3611 if (!ptid_equal (ecs->ptid, inferior_ptid))
3612 context_switch (ecs->ptid);
3613
3614 current_inferior ()->waiting_for_vfork_done = 0;
3615 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3616 /* This also takes care of reinserting breakpoints in the
3617 previously locked inferior. */
3618 keep_going (ecs);
3619 return;
3620
3621 case TARGET_WAITKIND_EXECD:
3622 if (debug_infrun)
3623 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3624
3625 if (!ptid_equal (ecs->ptid, inferior_ptid))
3626 {
3627 context_switch (ecs->ptid);
3628 reinit_frame_cache ();
3629 }
3630
3631 singlestep_breakpoints_inserted_p = 0;
3632 cancel_single_step_breakpoints ();
3633
3634 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3635
3636 /* Do whatever is necessary to the parent branch of the vfork. */
3637 handle_vfork_child_exec_or_exit (1);
3638
3639 /* This causes the eventpoints and symbol table to be reset.
3640 Must do this now, before trying to determine whether to
3641 stop. */
3642 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3643
3644 ecs->event_thread->control.stop_bpstat
3645 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3646 stop_pc, ecs->ptid, &ecs->ws);
3647 ecs->random_signal
3648 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3649
3650 /* Note that this may be referenced from inside
3651 bpstat_stop_status above, through inferior_has_execd. */
3652 xfree (ecs->ws.value.execd_pathname);
3653 ecs->ws.value.execd_pathname = NULL;
3654
3655 /* If no catchpoint triggered for this, then keep going. */
3656 if (ecs->random_signal)
3657 {
3658 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3659 keep_going (ecs);
3660 return;
3661 }
3662 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3663 goto process_event_stop_test;
3664
3665 /* Be careful not to try to gather much state about a thread
3666 that's in a syscall. It's frequently a losing proposition. */
3667 case TARGET_WAITKIND_SYSCALL_ENTRY:
3668 if (debug_infrun)
3669 fprintf_unfiltered (gdb_stdlog,
3670 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3671 /* Getting the current syscall number. */
3672 if (handle_syscall_event (ecs) != 0)
3673 return;
3674 goto process_event_stop_test;
3675
3676 /* Before examining the threads further, step this thread to
3677 get it entirely out of the syscall. (We get notice of the
3678 event when the thread is just on the verge of exiting a
3679 syscall. Stepping one instruction seems to get it back
3680 into user code.) */
3681 case TARGET_WAITKIND_SYSCALL_RETURN:
3682 if (debug_infrun)
3683 fprintf_unfiltered (gdb_stdlog,
3684 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3685 if (handle_syscall_event (ecs) != 0)
3686 return;
3687 goto process_event_stop_test;
3688
3689 case TARGET_WAITKIND_STOPPED:
3690 if (debug_infrun)
3691 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3692 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3693 break;
3694
3695 case TARGET_WAITKIND_NO_HISTORY:
3696 if (debug_infrun)
3697 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3698 /* Reverse execution: target ran out of history info. */
3699 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3700 print_no_history_reason ();
3701 stop_stepping (ecs);
3702 return;
3703 }
3704
3705 if (ecs->new_thread_event)
3706 {
3707 if (non_stop)
3708 /* Non-stop assumes that the target handles adding new threads
3709 to the thread list. */
3710 internal_error (__FILE__, __LINE__,
3711 "targets should add new threads to the thread "
3712 "list themselves in non-stop mode.");
3713
3714 /* We may want to consider not doing a resume here in order to
3715 give the user a chance to play with the new thread. It might
3716 be good to make that a user-settable option. */
3717
3718 /* At this point, all threads are stopped (happens automatically
3719 in either the OS or the native code). Therefore we need to
3720 continue all threads in order to make progress. */
3721
3722 if (!ptid_equal (ecs->ptid, inferior_ptid))
3723 context_switch (ecs->ptid);
3724 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3725 prepare_to_wait (ecs);
3726 return;
3727 }
3728
3729 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3730 {
3731 /* Do we need to clean up the state of a thread that has
3732 completed a displaced single-step? (Doing so usually affects
3733 the PC, so do it here, before we set stop_pc.) */
3734 displaced_step_fixup (ecs->ptid,
3735 ecs->event_thread->suspend.stop_signal);
3736
3737 /* If we either finished a single-step or hit a breakpoint, but
3738 the user wanted this thread to be stopped, pretend we got a
3739 SIG0 (generic unsignaled stop). */
3740
3741 if (ecs->event_thread->stop_requested
3742 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3743 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3744 }
3745
3746 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3747
3748 if (debug_infrun)
3749 {
3750 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3751 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3752 struct cleanup *old_chain = save_inferior_ptid ();
3753
3754 inferior_ptid = ecs->ptid;
3755
3756 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3757 paddress (gdbarch, stop_pc));
3758 if (target_stopped_by_watchpoint ())
3759 {
3760 CORE_ADDR addr;
3761
3762 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3763
3764 if (target_stopped_data_address (&current_target, &addr))
3765 fprintf_unfiltered (gdb_stdlog,
3766 "infrun: stopped data address = %s\n",
3767 paddress (gdbarch, addr));
3768 else
3769 fprintf_unfiltered (gdb_stdlog,
3770 "infrun: (no data address available)\n");
3771 }
3772
3773 do_cleanups (old_chain);
3774 }
3775
3776 if (stepping_past_singlestep_breakpoint)
3777 {
3778 gdb_assert (singlestep_breakpoints_inserted_p);
3779 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3780 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3781
3782 stepping_past_singlestep_breakpoint = 0;
3783
3784 /* We've either finished single-stepping past the single-step
3785 breakpoint, or stopped for some other reason. It would be nice if
3786 we could tell, but we can't reliably. */
3787 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3788 {
3789 if (debug_infrun)
3790 fprintf_unfiltered (gdb_stdlog,
3791 "infrun: stepping_past_"
3792 "singlestep_breakpoint\n");
3793 /* Pull the single step breakpoints out of the target. */
3794 remove_single_step_breakpoints ();
3795 singlestep_breakpoints_inserted_p = 0;
3796
3797 ecs->random_signal = 0;
3798 ecs->event_thread->control.trap_expected = 0;
3799
3800 context_switch (saved_singlestep_ptid);
3801 if (deprecated_context_hook)
3802 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3803
3804 resume (1, TARGET_SIGNAL_0);
3805 prepare_to_wait (ecs);
3806 return;
3807 }
3808 }
3809
3810 if (!ptid_equal (deferred_step_ptid, null_ptid))
3811 {
3812 /* In non-stop mode, there's never a deferred_step_ptid set. */
3813 gdb_assert (!non_stop);
3814
3815 /* If we stopped for some other reason than single-stepping, ignore
3816 the fact that we were supposed to switch back. */
3817 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3818 {
3819 if (debug_infrun)
3820 fprintf_unfiltered (gdb_stdlog,
3821 "infrun: handling deferred step\n");
3822
3823 /* Pull the single step breakpoints out of the target. */
3824 if (singlestep_breakpoints_inserted_p)
3825 {
3826 remove_single_step_breakpoints ();
3827 singlestep_breakpoints_inserted_p = 0;
3828 }
3829
3830 ecs->event_thread->control.trap_expected = 0;
3831
3832 /* Note: We do not call context_switch at this point, as the
3833 context is already set up for stepping the original thread. */
3834 switch_to_thread (deferred_step_ptid);
3835 deferred_step_ptid = null_ptid;
3836 /* Suppress spurious "Switching to ..." message. */
3837 previous_inferior_ptid = inferior_ptid;
3838
3839 resume (1, TARGET_SIGNAL_0);
3840 prepare_to_wait (ecs);
3841 return;
3842 }
3843
3844 deferred_step_ptid = null_ptid;
3845 }
3846
3847 /* See if a thread hit a thread-specific breakpoint that was meant for
3848 another thread. If so, then step that thread past the breakpoint,
3849 and continue it. */
3850
3851 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3852 {
3853 int thread_hop_needed = 0;
3854 struct address_space *aspace =
3855 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3856
3857 /* Check if a regular breakpoint has been hit before checking
3858 for a potential single step breakpoint. Otherwise, GDB will
3859 not see this breakpoint hit when stepping onto breakpoints. */
3860 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3861 {
3862 ecs->random_signal = 0;
3863 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3864 thread_hop_needed = 1;
3865 }
3866 else if (singlestep_breakpoints_inserted_p)
3867 {
3868 /* We have not context switched yet, so this should be true
3869 no matter which thread hit the singlestep breakpoint. */
3870 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3871 if (debug_infrun)
3872 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3873 "trap for %s\n",
3874 target_pid_to_str (ecs->ptid));
3875
3876 ecs->random_signal = 0;
3877 /* The call to in_thread_list is necessary because PTIDs sometimes
3878 change when we go from single-threaded to multi-threaded. If
3879 the singlestep_ptid is still in the list, assume that it is
3880 really different from ecs->ptid. */
3881 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3882 && in_thread_list (singlestep_ptid))
3883 {
3884 /* If the PC of the thread we were trying to single-step
3885 has changed, discard this event (which we were going
3886 to ignore anyway), and pretend we saw that thread
3887 trap. This prevents us continuously moving the
3888 single-step breakpoint forward, one instruction at a
3889 time. If the PC has changed, then the thread we were
3890 trying to single-step has trapped or been signalled,
3891 but the event has not been reported to GDB yet.
3892
3893 There might be some cases where this loses signal
3894 information, if a signal has arrived at exactly the
3895 same time that the PC changed, but this is the best
3896 we can do with the information available. Perhaps we
3897 should arrange to report all events for all threads
3898 when they stop, or to re-poll the remote looking for
3899 this particular thread (i.e. temporarily enable
3900 schedlock). */
3901
3902 CORE_ADDR new_singlestep_pc
3903 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3904
3905 if (new_singlestep_pc != singlestep_pc)
3906 {
3907 enum target_signal stop_signal;
3908
3909 if (debug_infrun)
3910 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3911 " but expected thread advanced also\n");
3912
3913 /* The current context still belongs to
3914 singlestep_ptid. Don't swap here, since that's
3915 the context we want to use. Just fudge our
3916 state and continue. */
3917 stop_signal = ecs->event_thread->suspend.stop_signal;
3918 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3919 ecs->ptid = singlestep_ptid;
3920 ecs->event_thread = find_thread_ptid (ecs->ptid);
3921 ecs->event_thread->suspend.stop_signal = stop_signal;
3922 stop_pc = new_singlestep_pc;
3923 }
3924 else
3925 {
3926 if (debug_infrun)
3927 fprintf_unfiltered (gdb_stdlog,
3928 "infrun: unexpected thread\n");
3929
3930 thread_hop_needed = 1;
3931 stepping_past_singlestep_breakpoint = 1;
3932 saved_singlestep_ptid = singlestep_ptid;
3933 }
3934 }
3935 }
3936
3937 if (thread_hop_needed)
3938 {
3939 struct regcache *thread_regcache;
3940 int remove_status = 0;
3941
3942 if (debug_infrun)
3943 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3944
3945 /* Switch context before touching inferior memory, the
3946 previous thread may have exited. */
3947 if (!ptid_equal (inferior_ptid, ecs->ptid))
3948 context_switch (ecs->ptid);
3949
3950 /* Saw a breakpoint, but it was hit by the wrong thread.
3951 Just continue. */
3952
3953 if (singlestep_breakpoints_inserted_p)
3954 {
3955 /* Pull the single step breakpoints out of the target. */
3956 remove_single_step_breakpoints ();
3957 singlestep_breakpoints_inserted_p = 0;
3958 }
3959
3960 /* If the arch can displace step, don't remove the
3961 breakpoints. */
3962 thread_regcache = get_thread_regcache (ecs->ptid);
3963 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3964 remove_status = remove_breakpoints ();
3965
3966 /* Did we fail to remove breakpoints? If so, try
3967 to set the PC past the bp. (There's at least
3968 one situation in which we can fail to remove
3969 the bp's: On HP-UX's that use ttrace, we can't
3970 change the address space of a vforking child
3971 process until the child exits (well, okay, not
3972 then either :-) or execs. */
3973 if (remove_status != 0)
3974 error (_("Cannot step over breakpoint hit in wrong thread"));
3975 else
3976 { /* Single step */
3977 if (!non_stop)
3978 {
3979 /* Only need to require the next event from this
3980 thread in all-stop mode. */
3981 waiton_ptid = ecs->ptid;
3982 infwait_state = infwait_thread_hop_state;
3983 }
3984
3985 ecs->event_thread->stepping_over_breakpoint = 1;
3986 keep_going (ecs);
3987 return;
3988 }
3989 }
3990 else if (singlestep_breakpoints_inserted_p)
3991 {
3992 ecs->random_signal = 0;
3993 }
3994 }
3995 else
3996 ecs->random_signal = 1;
3997
3998 /* See if something interesting happened to the non-current thread. If
3999 so, then switch to that thread. */
4000 if (!ptid_equal (ecs->ptid, inferior_ptid))
4001 {
4002 if (debug_infrun)
4003 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4004
4005 context_switch (ecs->ptid);
4006
4007 if (deprecated_context_hook)
4008 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4009 }
4010
4011 /* At this point, get hold of the now-current thread's frame. */
4012 frame = get_current_frame ();
4013 gdbarch = get_frame_arch (frame);
4014
4015 if (singlestep_breakpoints_inserted_p)
4016 {
4017 /* Pull the single step breakpoints out of the target. */
4018 remove_single_step_breakpoints ();
4019 singlestep_breakpoints_inserted_p = 0;
4020 }
4021
4022 if (stepped_after_stopped_by_watchpoint)
4023 stopped_by_watchpoint = 0;
4024 else
4025 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4026
4027 /* If necessary, step over this watchpoint. We'll be back to display
4028 it in a moment. */
4029 if (stopped_by_watchpoint
4030 && (target_have_steppable_watchpoint
4031 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4032 {
4033 /* At this point, we are stopped at an instruction which has
4034 attempted to write to a piece of memory under control of
4035 a watchpoint. The instruction hasn't actually executed
4036 yet. If we were to evaluate the watchpoint expression
4037 now, we would get the old value, and therefore no change
4038 would seem to have occurred.
4039
4040 In order to make watchpoints work `right', we really need
4041 to complete the memory write, and then evaluate the
4042 watchpoint expression. We do this by single-stepping the
4043 target.
4044
4045 It may not be necessary to disable the watchpoint to stop over
4046 it. For example, the PA can (with some kernel cooperation)
4047 single step over a watchpoint without disabling the watchpoint.
4048
4049 It is far more common to need to disable a watchpoint to step
4050 the inferior over it. If we have non-steppable watchpoints,
4051 we must disable the current watchpoint; it's simplest to
4052 disable all watchpoints and breakpoints. */
4053 int hw_step = 1;
4054
4055 if (!target_have_steppable_watchpoint)
4056 {
4057 remove_breakpoints ();
4058 /* See comment in resume why we need to stop bypassing signals
4059 while breakpoints have been removed. */
4060 target_pass_signals (0, NULL);
4061 }
4062 /* Single step */
4063 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4064 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
4065 waiton_ptid = ecs->ptid;
4066 if (target_have_steppable_watchpoint)
4067 infwait_state = infwait_step_watch_state;
4068 else
4069 infwait_state = infwait_nonstep_watch_state;
4070 prepare_to_wait (ecs);
4071 return;
4072 }
4073
4074 clear_stop_func (ecs);
4075 ecs->event_thread->stepping_over_breakpoint = 0;
4076 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4077 ecs->event_thread->control.stop_step = 0;
4078 stop_print_frame = 1;
4079 ecs->random_signal = 0;
4080 stopped_by_random_signal = 0;
4081
4082 /* Hide inlined functions starting here, unless we just performed stepi or
4083 nexti. After stepi and nexti, always show the innermost frame (not any
4084 inline function call sites). */
4085 if (ecs->event_thread->control.step_range_end != 1)
4086 {
4087 struct address_space *aspace =
4088 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4089
4090 /* skip_inline_frames is expensive, so we avoid it if we can
4091 determine that the address is one where functions cannot have
4092 been inlined. This improves performance with inferiors that
4093 load a lot of shared libraries, because the solib event
4094 breakpoint is defined as the address of a function (i.e. not
4095 inline). Note that we have to check the previous PC as well
4096 as the current one to catch cases when we have just
4097 single-stepped off a breakpoint prior to reinstating it.
4098 Note that we're assuming that the code we single-step to is
4099 not inline, but that's not definitive: there's nothing
4100 preventing the event breakpoint function from containing
4101 inlined code, and the single-step ending up there. If the
4102 user had set a breakpoint on that inlined code, the missing
4103 skip_inline_frames call would break things. Fortunately
4104 that's an extremely unlikely scenario. */
4105 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4106 && !(ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4107 && ecs->event_thread->control.trap_expected
4108 && pc_at_non_inline_function (aspace,
4109 ecs->event_thread->prev_pc,
4110 &ecs->ws)))
4111 skip_inline_frames (ecs->ptid);
4112 }
4113
4114 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4115 && ecs->event_thread->control.trap_expected
4116 && gdbarch_single_step_through_delay_p (gdbarch)
4117 && currently_stepping (ecs->event_thread))
4118 {
4119 /* We're trying to step off a breakpoint. Turns out that we're
4120 also on an instruction that needs to be stepped multiple
4121 times before it's been fully executing. E.g., architectures
4122 with a delay slot. It needs to be stepped twice, once for
4123 the instruction and once for the delay slot. */
4124 int step_through_delay
4125 = gdbarch_single_step_through_delay (gdbarch, frame);
4126
4127 if (debug_infrun && step_through_delay)
4128 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4129 if (ecs->event_thread->control.step_range_end == 0
4130 && step_through_delay)
4131 {
4132 /* The user issued a continue when stopped at a breakpoint.
4133 Set up for another trap and get out of here. */
4134 ecs->event_thread->stepping_over_breakpoint = 1;
4135 keep_going (ecs);
4136 return;
4137 }
4138 else if (step_through_delay)
4139 {
4140 /* The user issued a step when stopped at a breakpoint.
4141 Maybe we should stop, maybe we should not - the delay
4142 slot *might* correspond to a line of source. In any
4143 case, don't decide that here, just set
4144 ecs->stepping_over_breakpoint, making sure we
4145 single-step again before breakpoints are re-inserted. */
4146 ecs->event_thread->stepping_over_breakpoint = 1;
4147 }
4148 }
4149
4150 /* Look at the cause of the stop, and decide what to do.
4151 The alternatives are:
4152 1) stop_stepping and return; to really stop and return to the debugger,
4153 2) keep_going and return to start up again
4154 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4155 3) set ecs->random_signal to 1, and the decision between 1 and 2
4156 will be made according to the signal handling tables. */
4157
4158 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4159 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4160 || stop_soon == STOP_QUIETLY_REMOTE)
4161 {
4162 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4163 && stop_after_trap)
4164 {
4165 if (debug_infrun)
4166 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4167 stop_print_frame = 0;
4168 stop_stepping (ecs);
4169 return;
4170 }
4171
4172 /* This is originated from start_remote(), start_inferior() and
4173 shared libraries hook functions. */
4174 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4175 {
4176 if (debug_infrun)
4177 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4178 stop_stepping (ecs);
4179 return;
4180 }
4181
4182 /* This originates from attach_command(). We need to overwrite
4183 the stop_signal here, because some kernels don't ignore a
4184 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4185 See more comments in inferior.h. On the other hand, if we
4186 get a non-SIGSTOP, report it to the user - assume the backend
4187 will handle the SIGSTOP if it should show up later.
4188
4189 Also consider that the attach is complete when we see a
4190 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4191 target extended-remote report it instead of a SIGSTOP
4192 (e.g. gdbserver). We already rely on SIGTRAP being our
4193 signal, so this is no exception.
4194
4195 Also consider that the attach is complete when we see a
4196 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4197 the target to stop all threads of the inferior, in case the
4198 low level attach operation doesn't stop them implicitly. If
4199 they weren't stopped implicitly, then the stub will report a
4200 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4201 other than GDB's request. */
4202 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4203 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4204 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4205 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4206 {
4207 stop_stepping (ecs);
4208 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4209 return;
4210 }
4211
4212 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4213 handles this event. */
4214 ecs->event_thread->control.stop_bpstat
4215 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4216 stop_pc, ecs->ptid, &ecs->ws);
4217
4218 /* Following in case break condition called a
4219 function. */
4220 stop_print_frame = 1;
4221
4222 /* This is where we handle "moribund" watchpoints. Unlike
4223 software breakpoints traps, hardware watchpoint traps are
4224 always distinguishable from random traps. If no high-level
4225 watchpoint is associated with the reported stop data address
4226 anymore, then the bpstat does not explain the signal ---
4227 simply make sure to ignore it if `stopped_by_watchpoint' is
4228 set. */
4229
4230 if (debug_infrun
4231 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4232 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4233 && stopped_by_watchpoint)
4234 fprintf_unfiltered (gdb_stdlog,
4235 "infrun: no user watchpoint explains "
4236 "watchpoint SIGTRAP, ignoring\n");
4237
4238 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4239 at one stage in the past included checks for an inferior
4240 function call's call dummy's return breakpoint. The original
4241 comment, that went with the test, read:
4242
4243 ``End of a stack dummy. Some systems (e.g. Sony news) give
4244 another signal besides SIGTRAP, so check here as well as
4245 above.''
4246
4247 If someone ever tries to get call dummys on a
4248 non-executable stack to work (where the target would stop
4249 with something like a SIGSEGV), then those tests might need
4250 to be re-instated. Given, however, that the tests were only
4251 enabled when momentary breakpoints were not being used, I
4252 suspect that it won't be the case.
4253
4254 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4255 be necessary for call dummies on a non-executable stack on
4256 SPARC. */
4257
4258 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4259 ecs->random_signal
4260 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4261 || stopped_by_watchpoint
4262 || ecs->event_thread->control.trap_expected
4263 || (ecs->event_thread->control.step_range_end
4264 && (ecs->event_thread->control.step_resume_breakpoint
4265 == NULL)));
4266 else
4267 {
4268 ecs->random_signal = !bpstat_explains_signal
4269 (ecs->event_thread->control.stop_bpstat);
4270 if (!ecs->random_signal)
4271 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4272 }
4273 }
4274
4275 /* When we reach this point, we've pretty much decided
4276 that the reason for stopping must've been a random
4277 (unexpected) signal. */
4278
4279 else
4280 ecs->random_signal = 1;
4281
4282 process_event_stop_test:
4283
4284 /* Re-fetch current thread's frame in case we did a
4285 "goto process_event_stop_test" above. */
4286 frame = get_current_frame ();
4287 gdbarch = get_frame_arch (frame);
4288
4289 /* For the program's own signals, act according to
4290 the signal handling tables. */
4291
4292 if (ecs->random_signal)
4293 {
4294 /* Signal not for debugging purposes. */
4295 int printed = 0;
4296 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4297
4298 if (debug_infrun)
4299 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4300 ecs->event_thread->suspend.stop_signal);
4301
4302 stopped_by_random_signal = 1;
4303
4304 if (signal_print[ecs->event_thread->suspend.stop_signal])
4305 {
4306 printed = 1;
4307 target_terminal_ours_for_output ();
4308 print_signal_received_reason
4309 (ecs->event_thread->suspend.stop_signal);
4310 }
4311 /* Always stop on signals if we're either just gaining control
4312 of the program, or the user explicitly requested this thread
4313 to remain stopped. */
4314 if (stop_soon != NO_STOP_QUIETLY
4315 || ecs->event_thread->stop_requested
4316 || (!inf->detaching
4317 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4318 {
4319 stop_stepping (ecs);
4320 return;
4321 }
4322 /* If not going to stop, give terminal back
4323 if we took it away. */
4324 else if (printed)
4325 target_terminal_inferior ();
4326
4327 /* Clear the signal if it should not be passed. */
4328 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4329 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4330
4331 if (ecs->event_thread->prev_pc == stop_pc
4332 && ecs->event_thread->control.trap_expected
4333 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4334 {
4335 /* We were just starting a new sequence, attempting to
4336 single-step off of a breakpoint and expecting a SIGTRAP.
4337 Instead this signal arrives. This signal will take us out
4338 of the stepping range so GDB needs to remember to, when
4339 the signal handler returns, resume stepping off that
4340 breakpoint. */
4341 /* To simplify things, "continue" is forced to use the same
4342 code paths as single-step - set a breakpoint at the
4343 signal return address and then, once hit, step off that
4344 breakpoint. */
4345 if (debug_infrun)
4346 fprintf_unfiltered (gdb_stdlog,
4347 "infrun: signal arrived while stepping over "
4348 "breakpoint\n");
4349
4350 insert_hp_step_resume_breakpoint_at_frame (frame);
4351 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4352 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4353 ecs->event_thread->control.trap_expected = 0;
4354 keep_going (ecs);
4355 return;
4356 }
4357
4358 if (ecs->event_thread->control.step_range_end != 0
4359 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4360 && (ecs->event_thread->control.step_range_start <= stop_pc
4361 && stop_pc < ecs->event_thread->control.step_range_end)
4362 && frame_id_eq (get_stack_frame_id (frame),
4363 ecs->event_thread->control.step_stack_frame_id)
4364 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4365 {
4366 /* The inferior is about to take a signal that will take it
4367 out of the single step range. Set a breakpoint at the
4368 current PC (which is presumably where the signal handler
4369 will eventually return) and then allow the inferior to
4370 run free.
4371
4372 Note that this is only needed for a signal delivered
4373 while in the single-step range. Nested signals aren't a
4374 problem as they eventually all return. */
4375 if (debug_infrun)
4376 fprintf_unfiltered (gdb_stdlog,
4377 "infrun: signal may take us out of "
4378 "single-step range\n");
4379
4380 insert_hp_step_resume_breakpoint_at_frame (frame);
4381 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4382 ecs->event_thread->control.trap_expected = 0;
4383 keep_going (ecs);
4384 return;
4385 }
4386
4387 /* Note: step_resume_breakpoint may be non-NULL. This occures
4388 when either there's a nested signal, or when there's a
4389 pending signal enabled just as the signal handler returns
4390 (leaving the inferior at the step-resume-breakpoint without
4391 actually executing it). Either way continue until the
4392 breakpoint is really hit. */
4393 keep_going (ecs);
4394 return;
4395 }
4396
4397 /* Handle cases caused by hitting a breakpoint. */
4398 {
4399 CORE_ADDR jmp_buf_pc;
4400 struct bpstat_what what;
4401
4402 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4403
4404 if (what.call_dummy)
4405 {
4406 stop_stack_dummy = what.call_dummy;
4407 }
4408
4409 /* If we hit an internal event that triggers symbol changes, the
4410 current frame will be invalidated within bpstat_what (e.g., if
4411 we hit an internal solib event). Re-fetch it. */
4412 frame = get_current_frame ();
4413 gdbarch = get_frame_arch (frame);
4414
4415 switch (what.main_action)
4416 {
4417 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4418 /* If we hit the breakpoint at longjmp while stepping, we
4419 install a momentary breakpoint at the target of the
4420 jmp_buf. */
4421
4422 if (debug_infrun)
4423 fprintf_unfiltered (gdb_stdlog,
4424 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4425
4426 ecs->event_thread->stepping_over_breakpoint = 1;
4427
4428 if (what.is_longjmp)
4429 {
4430 if (!gdbarch_get_longjmp_target_p (gdbarch)
4431 || !gdbarch_get_longjmp_target (gdbarch,
4432 frame, &jmp_buf_pc))
4433 {
4434 if (debug_infrun)
4435 fprintf_unfiltered (gdb_stdlog,
4436 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4437 "(!gdbarch_get_longjmp_target)\n");
4438 keep_going (ecs);
4439 return;
4440 }
4441
4442 /* We're going to replace the current step-resume breakpoint
4443 with a longjmp-resume breakpoint. */
4444 delete_step_resume_breakpoint (ecs->event_thread);
4445
4446 /* Insert a breakpoint at resume address. */
4447 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4448 }
4449 else
4450 {
4451 struct symbol *func = get_frame_function (frame);
4452
4453 if (func)
4454 check_exception_resume (ecs, frame, func);
4455 }
4456 keep_going (ecs);
4457 return;
4458
4459 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4460 if (debug_infrun)
4461 fprintf_unfiltered (gdb_stdlog,
4462 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4463
4464 if (what.is_longjmp)
4465 {
4466 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4467 != NULL);
4468 delete_step_resume_breakpoint (ecs->event_thread);
4469 }
4470 else
4471 {
4472 /* There are several cases to consider.
4473
4474 1. The initiating frame no longer exists. In this case
4475 we must stop, because the exception has gone too far.
4476
4477 2. The initiating frame exists, and is the same as the
4478 current frame. We stop, because the exception has been
4479 caught.
4480
4481 3. The initiating frame exists and is different from
4482 the current frame. This means the exception has been
4483 caught beneath the initiating frame, so keep going. */
4484 struct frame_info *init_frame
4485 = frame_find_by_id (ecs->event_thread->initiating_frame);
4486
4487 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4488 != NULL);
4489 delete_exception_resume_breakpoint (ecs->event_thread);
4490
4491 if (init_frame)
4492 {
4493 struct frame_id current_id
4494 = get_frame_id (get_current_frame ());
4495 if (frame_id_eq (current_id,
4496 ecs->event_thread->initiating_frame))
4497 {
4498 /* Case 2. Fall through. */
4499 }
4500 else
4501 {
4502 /* Case 3. */
4503 keep_going (ecs);
4504 return;
4505 }
4506 }
4507
4508 /* For Cases 1 and 2, remove the step-resume breakpoint,
4509 if it exists. */
4510 delete_step_resume_breakpoint (ecs->event_thread);
4511 }
4512
4513 ecs->event_thread->control.stop_step = 1;
4514 print_end_stepping_range_reason ();
4515 stop_stepping (ecs);
4516 return;
4517
4518 case BPSTAT_WHAT_SINGLE:
4519 if (debug_infrun)
4520 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4521 ecs->event_thread->stepping_over_breakpoint = 1;
4522 /* Still need to check other stuff, at least the case
4523 where we are stepping and step out of the right range. */
4524 break;
4525
4526 case BPSTAT_WHAT_STEP_RESUME:
4527 if (debug_infrun)
4528 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4529
4530 delete_step_resume_breakpoint (ecs->event_thread);
4531 if (ecs->event_thread->control.proceed_to_finish
4532 && execution_direction == EXEC_REVERSE)
4533 {
4534 struct thread_info *tp = ecs->event_thread;
4535
4536 /* We are finishing a function in reverse, and just hit
4537 the step-resume breakpoint at the start address of the
4538 function, and we're almost there -- just need to back
4539 up by one more single-step, which should take us back
4540 to the function call. */
4541 tp->control.step_range_start = tp->control.step_range_end = 1;
4542 keep_going (ecs);
4543 return;
4544 }
4545 fill_in_stop_func (gdbarch, ecs);
4546 if (stop_pc == ecs->stop_func_start
4547 && execution_direction == EXEC_REVERSE)
4548 {
4549 /* We are stepping over a function call in reverse, and
4550 just hit the step-resume breakpoint at the start
4551 address of the function. Go back to single-stepping,
4552 which should take us back to the function call. */
4553 ecs->event_thread->stepping_over_breakpoint = 1;
4554 keep_going (ecs);
4555 return;
4556 }
4557 break;
4558
4559 case BPSTAT_WHAT_STOP_NOISY:
4560 if (debug_infrun)
4561 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4562 stop_print_frame = 1;
4563
4564 /* We are about to nuke the step_resume_breakpointt via the
4565 cleanup chain, so no need to worry about it here. */
4566
4567 stop_stepping (ecs);
4568 return;
4569
4570 case BPSTAT_WHAT_STOP_SILENT:
4571 if (debug_infrun)
4572 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4573 stop_print_frame = 0;
4574
4575 /* We are about to nuke the step_resume_breakpoin via the
4576 cleanup chain, so no need to worry about it here. */
4577
4578 stop_stepping (ecs);
4579 return;
4580
4581 case BPSTAT_WHAT_HP_STEP_RESUME:
4582 if (debug_infrun)
4583 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4584
4585 delete_step_resume_breakpoint (ecs->event_thread);
4586 if (ecs->event_thread->step_after_step_resume_breakpoint)
4587 {
4588 /* Back when the step-resume breakpoint was inserted, we
4589 were trying to single-step off a breakpoint. Go back
4590 to doing that. */
4591 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4592 ecs->event_thread->stepping_over_breakpoint = 1;
4593 keep_going (ecs);
4594 return;
4595 }
4596 break;
4597
4598 case BPSTAT_WHAT_KEEP_CHECKING:
4599 break;
4600 }
4601 }
4602
4603 /* We come here if we hit a breakpoint but should not
4604 stop for it. Possibly we also were stepping
4605 and should stop for that. So fall through and
4606 test for stepping. But, if not stepping,
4607 do not stop. */
4608
4609 /* In all-stop mode, if we're currently stepping but have stopped in
4610 some other thread, we need to switch back to the stepped thread. */
4611 if (!non_stop)
4612 {
4613 struct thread_info *tp;
4614
4615 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4616 ecs->event_thread);
4617 if (tp)
4618 {
4619 /* However, if the current thread is blocked on some internal
4620 breakpoint, and we simply need to step over that breakpoint
4621 to get it going again, do that first. */
4622 if ((ecs->event_thread->control.trap_expected
4623 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4624 || ecs->event_thread->stepping_over_breakpoint)
4625 {
4626 keep_going (ecs);
4627 return;
4628 }
4629
4630 /* If the stepping thread exited, then don't try to switch
4631 back and resume it, which could fail in several different
4632 ways depending on the target. Instead, just keep going.
4633
4634 We can find a stepping dead thread in the thread list in
4635 two cases:
4636
4637 - The target supports thread exit events, and when the
4638 target tries to delete the thread from the thread list,
4639 inferior_ptid pointed at the exiting thread. In such
4640 case, calling delete_thread does not really remove the
4641 thread from the list; instead, the thread is left listed,
4642 with 'exited' state.
4643
4644 - The target's debug interface does not support thread
4645 exit events, and so we have no idea whatsoever if the
4646 previously stepping thread is still alive. For that
4647 reason, we need to synchronously query the target
4648 now. */
4649 if (is_exited (tp->ptid)
4650 || !target_thread_alive (tp->ptid))
4651 {
4652 if (debug_infrun)
4653 fprintf_unfiltered (gdb_stdlog,
4654 "infrun: not switching back to "
4655 "stepped thread, it has vanished\n");
4656
4657 delete_thread (tp->ptid);
4658 keep_going (ecs);
4659 return;
4660 }
4661
4662 /* Otherwise, we no longer expect a trap in the current thread.
4663 Clear the trap_expected flag before switching back -- this is
4664 what keep_going would do as well, if we called it. */
4665 ecs->event_thread->control.trap_expected = 0;
4666
4667 if (debug_infrun)
4668 fprintf_unfiltered (gdb_stdlog,
4669 "infrun: switching back to stepped thread\n");
4670
4671 ecs->event_thread = tp;
4672 ecs->ptid = tp->ptid;
4673 context_switch (ecs->ptid);
4674 keep_going (ecs);
4675 return;
4676 }
4677 }
4678
4679 if (ecs->event_thread->control.step_resume_breakpoint)
4680 {
4681 if (debug_infrun)
4682 fprintf_unfiltered (gdb_stdlog,
4683 "infrun: step-resume breakpoint is inserted\n");
4684
4685 /* Having a step-resume breakpoint overrides anything
4686 else having to do with stepping commands until
4687 that breakpoint is reached. */
4688 keep_going (ecs);
4689 return;
4690 }
4691
4692 if (ecs->event_thread->control.step_range_end == 0)
4693 {
4694 if (debug_infrun)
4695 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4696 /* Likewise if we aren't even stepping. */
4697 keep_going (ecs);
4698 return;
4699 }
4700
4701 /* Re-fetch current thread's frame in case the code above caused
4702 the frame cache to be re-initialized, making our FRAME variable
4703 a dangling pointer. */
4704 frame = get_current_frame ();
4705 gdbarch = get_frame_arch (frame);
4706 fill_in_stop_func (gdbarch, ecs);
4707
4708 /* If stepping through a line, keep going if still within it.
4709
4710 Note that step_range_end is the address of the first instruction
4711 beyond the step range, and NOT the address of the last instruction
4712 within it!
4713
4714 Note also that during reverse execution, we may be stepping
4715 through a function epilogue and therefore must detect when
4716 the current-frame changes in the middle of a line. */
4717
4718 if (stop_pc >= ecs->event_thread->control.step_range_start
4719 && stop_pc < ecs->event_thread->control.step_range_end
4720 && (execution_direction != EXEC_REVERSE
4721 || frame_id_eq (get_frame_id (frame),
4722 ecs->event_thread->control.step_frame_id)))
4723 {
4724 if (debug_infrun)
4725 fprintf_unfiltered
4726 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4727 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4728 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4729
4730 /* When stepping backward, stop at beginning of line range
4731 (unless it's the function entry point, in which case
4732 keep going back to the call point). */
4733 if (stop_pc == ecs->event_thread->control.step_range_start
4734 && stop_pc != ecs->stop_func_start
4735 && execution_direction == EXEC_REVERSE)
4736 {
4737 ecs->event_thread->control.stop_step = 1;
4738 print_end_stepping_range_reason ();
4739 stop_stepping (ecs);
4740 }
4741 else
4742 keep_going (ecs);
4743
4744 return;
4745 }
4746
4747 /* We stepped out of the stepping range. */
4748
4749 /* If we are stepping at the source level and entered the runtime
4750 loader dynamic symbol resolution code...
4751
4752 EXEC_FORWARD: we keep on single stepping until we exit the run
4753 time loader code and reach the callee's address.
4754
4755 EXEC_REVERSE: we've already executed the callee (backward), and
4756 the runtime loader code is handled just like any other
4757 undebuggable function call. Now we need only keep stepping
4758 backward through the trampoline code, and that's handled further
4759 down, so there is nothing for us to do here. */
4760
4761 if (execution_direction != EXEC_REVERSE
4762 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4763 && in_solib_dynsym_resolve_code (stop_pc))
4764 {
4765 CORE_ADDR pc_after_resolver =
4766 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4767
4768 if (debug_infrun)
4769 fprintf_unfiltered (gdb_stdlog,
4770 "infrun: stepped into dynsym resolve code\n");
4771
4772 if (pc_after_resolver)
4773 {
4774 /* Set up a step-resume breakpoint at the address
4775 indicated by SKIP_SOLIB_RESOLVER. */
4776 struct symtab_and_line sr_sal;
4777
4778 init_sal (&sr_sal);
4779 sr_sal.pc = pc_after_resolver;
4780 sr_sal.pspace = get_frame_program_space (frame);
4781
4782 insert_step_resume_breakpoint_at_sal (gdbarch,
4783 sr_sal, null_frame_id);
4784 }
4785
4786 keep_going (ecs);
4787 return;
4788 }
4789
4790 if (ecs->event_thread->control.step_range_end != 1
4791 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4792 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4793 && get_frame_type (frame) == SIGTRAMP_FRAME)
4794 {
4795 if (debug_infrun)
4796 fprintf_unfiltered (gdb_stdlog,
4797 "infrun: stepped into signal trampoline\n");
4798 /* The inferior, while doing a "step" or "next", has ended up in
4799 a signal trampoline (either by a signal being delivered or by
4800 the signal handler returning). Just single-step until the
4801 inferior leaves the trampoline (either by calling the handler
4802 or returning). */
4803 keep_going (ecs);
4804 return;
4805 }
4806
4807 /* Check for subroutine calls. The check for the current frame
4808 equalling the step ID is not necessary - the check of the
4809 previous frame's ID is sufficient - but it is a common case and
4810 cheaper than checking the previous frame's ID.
4811
4812 NOTE: frame_id_eq will never report two invalid frame IDs as
4813 being equal, so to get into this block, both the current and
4814 previous frame must have valid frame IDs. */
4815 /* The outer_frame_id check is a heuristic to detect stepping
4816 through startup code. If we step over an instruction which
4817 sets the stack pointer from an invalid value to a valid value,
4818 we may detect that as a subroutine call from the mythical
4819 "outermost" function. This could be fixed by marking
4820 outermost frames as !stack_p,code_p,special_p. Then the
4821 initial outermost frame, before sp was valid, would
4822 have code_addr == &_start. See the comment in frame_id_eq
4823 for more. */
4824 if (!frame_id_eq (get_stack_frame_id (frame),
4825 ecs->event_thread->control.step_stack_frame_id)
4826 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4827 ecs->event_thread->control.step_stack_frame_id)
4828 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4829 outer_frame_id)
4830 || step_start_function != find_pc_function (stop_pc))))
4831 {
4832 CORE_ADDR real_stop_pc;
4833
4834 if (debug_infrun)
4835 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4836
4837 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4838 || ((ecs->event_thread->control.step_range_end == 1)
4839 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4840 ecs->stop_func_start)))
4841 {
4842 /* I presume that step_over_calls is only 0 when we're
4843 supposed to be stepping at the assembly language level
4844 ("stepi"). Just stop. */
4845 /* Also, maybe we just did a "nexti" inside a prolog, so we
4846 thought it was a subroutine call but it was not. Stop as
4847 well. FENN */
4848 /* And this works the same backward as frontward. MVS */
4849 ecs->event_thread->control.stop_step = 1;
4850 print_end_stepping_range_reason ();
4851 stop_stepping (ecs);
4852 return;
4853 }
4854
4855 /* Reverse stepping through solib trampolines. */
4856
4857 if (execution_direction == EXEC_REVERSE
4858 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4859 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4860 || (ecs->stop_func_start == 0
4861 && in_solib_dynsym_resolve_code (stop_pc))))
4862 {
4863 /* Any solib trampoline code can be handled in reverse
4864 by simply continuing to single-step. We have already
4865 executed the solib function (backwards), and a few
4866 steps will take us back through the trampoline to the
4867 caller. */
4868 keep_going (ecs);
4869 return;
4870 }
4871
4872 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4873 {
4874 /* We're doing a "next".
4875
4876 Normal (forward) execution: set a breakpoint at the
4877 callee's return address (the address at which the caller
4878 will resume).
4879
4880 Reverse (backward) execution. set the step-resume
4881 breakpoint at the start of the function that we just
4882 stepped into (backwards), and continue to there. When we
4883 get there, we'll need to single-step back to the caller. */
4884
4885 if (execution_direction == EXEC_REVERSE)
4886 {
4887 struct symtab_and_line sr_sal;
4888
4889 /* Normal function call return (static or dynamic). */
4890 init_sal (&sr_sal);
4891 sr_sal.pc = ecs->stop_func_start;
4892 sr_sal.pspace = get_frame_program_space (frame);
4893 insert_step_resume_breakpoint_at_sal (gdbarch,
4894 sr_sal, null_frame_id);
4895 }
4896 else
4897 insert_step_resume_breakpoint_at_caller (frame);
4898
4899 keep_going (ecs);
4900 return;
4901 }
4902
4903 /* If we are in a function call trampoline (a stub between the
4904 calling routine and the real function), locate the real
4905 function. That's what tells us (a) whether we want to step
4906 into it at all, and (b) what prologue we want to run to the
4907 end of, if we do step into it. */
4908 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4909 if (real_stop_pc == 0)
4910 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4911 if (real_stop_pc != 0)
4912 ecs->stop_func_start = real_stop_pc;
4913
4914 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4915 {
4916 struct symtab_and_line sr_sal;
4917
4918 init_sal (&sr_sal);
4919 sr_sal.pc = ecs->stop_func_start;
4920 sr_sal.pspace = get_frame_program_space (frame);
4921
4922 insert_step_resume_breakpoint_at_sal (gdbarch,
4923 sr_sal, null_frame_id);
4924 keep_going (ecs);
4925 return;
4926 }
4927
4928 /* If we have line number information for the function we are
4929 thinking of stepping into and the function isn't on the skip
4930 list, step into it.
4931
4932 If there are several symtabs at that PC (e.g. with include
4933 files), just want to know whether *any* of them have line
4934 numbers. find_pc_line handles this. */
4935 {
4936 struct symtab_and_line tmp_sal;
4937
4938 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4939 if (tmp_sal.line != 0
4940 && !function_pc_is_marked_for_skip (ecs->stop_func_start))
4941 {
4942 if (execution_direction == EXEC_REVERSE)
4943 handle_step_into_function_backward (gdbarch, ecs);
4944 else
4945 handle_step_into_function (gdbarch, ecs);
4946 return;
4947 }
4948 }
4949
4950 /* If we have no line number and the step-stop-if-no-debug is
4951 set, we stop the step so that the user has a chance to switch
4952 in assembly mode. */
4953 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4954 && step_stop_if_no_debug)
4955 {
4956 ecs->event_thread->control.stop_step = 1;
4957 print_end_stepping_range_reason ();
4958 stop_stepping (ecs);
4959 return;
4960 }
4961
4962 if (execution_direction == EXEC_REVERSE)
4963 {
4964 /* Set a breakpoint at callee's start address.
4965 From there we can step once and be back in the caller. */
4966 struct symtab_and_line sr_sal;
4967
4968 init_sal (&sr_sal);
4969 sr_sal.pc = ecs->stop_func_start;
4970 sr_sal.pspace = get_frame_program_space (frame);
4971 insert_step_resume_breakpoint_at_sal (gdbarch,
4972 sr_sal, null_frame_id);
4973 }
4974 else
4975 /* Set a breakpoint at callee's return address (the address
4976 at which the caller will resume). */
4977 insert_step_resume_breakpoint_at_caller (frame);
4978
4979 keep_going (ecs);
4980 return;
4981 }
4982
4983 /* Reverse stepping through solib trampolines. */
4984
4985 if (execution_direction == EXEC_REVERSE
4986 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4987 {
4988 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4989 || (ecs->stop_func_start == 0
4990 && in_solib_dynsym_resolve_code (stop_pc)))
4991 {
4992 /* Any solib trampoline code can be handled in reverse
4993 by simply continuing to single-step. We have already
4994 executed the solib function (backwards), and a few
4995 steps will take us back through the trampoline to the
4996 caller. */
4997 keep_going (ecs);
4998 return;
4999 }
5000 else if (in_solib_dynsym_resolve_code (stop_pc))
5001 {
5002 /* Stepped backward into the solib dynsym resolver.
5003 Set a breakpoint at its start and continue, then
5004 one more step will take us out. */
5005 struct symtab_and_line sr_sal;
5006
5007 init_sal (&sr_sal);
5008 sr_sal.pc = ecs->stop_func_start;
5009 sr_sal.pspace = get_frame_program_space (frame);
5010 insert_step_resume_breakpoint_at_sal (gdbarch,
5011 sr_sal, null_frame_id);
5012 keep_going (ecs);
5013 return;
5014 }
5015 }
5016
5017 /* If we're in the return path from a shared library trampoline,
5018 we want to proceed through the trampoline when stepping. */
5019 if (gdbarch_in_solib_return_trampoline (gdbarch,
5020 stop_pc, ecs->stop_func_name)
5021 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5022 {
5023 /* Determine where this trampoline returns. */
5024 CORE_ADDR real_stop_pc;
5025
5026 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5027
5028 if (debug_infrun)
5029 fprintf_unfiltered (gdb_stdlog,
5030 "infrun: stepped into solib return tramp\n");
5031
5032 /* Only proceed through if we know where it's going. */
5033 if (real_stop_pc)
5034 {
5035 /* And put the step-breakpoint there and go until there. */
5036 struct symtab_and_line sr_sal;
5037
5038 init_sal (&sr_sal); /* initialize to zeroes */
5039 sr_sal.pc = real_stop_pc;
5040 sr_sal.section = find_pc_overlay (sr_sal.pc);
5041 sr_sal.pspace = get_frame_program_space (frame);
5042
5043 /* Do not specify what the fp should be when we stop since
5044 on some machines the prologue is where the new fp value
5045 is established. */
5046 insert_step_resume_breakpoint_at_sal (gdbarch,
5047 sr_sal, null_frame_id);
5048
5049 /* Restart without fiddling with the step ranges or
5050 other state. */
5051 keep_going (ecs);
5052 return;
5053 }
5054 }
5055
5056 stop_pc_sal = find_pc_line (stop_pc, 0);
5057
5058 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5059 the trampoline processing logic, however, there are some trampolines
5060 that have no names, so we should do trampoline handling first. */
5061 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5062 && ecs->stop_func_name == NULL
5063 && stop_pc_sal.line == 0)
5064 {
5065 if (debug_infrun)
5066 fprintf_unfiltered (gdb_stdlog,
5067 "infrun: stepped into undebuggable function\n");
5068
5069 /* The inferior just stepped into, or returned to, an
5070 undebuggable function (where there is no debugging information
5071 and no line number corresponding to the address where the
5072 inferior stopped). Since we want to skip this kind of code,
5073 we keep going until the inferior returns from this
5074 function - unless the user has asked us not to (via
5075 set step-mode) or we no longer know how to get back
5076 to the call site. */
5077 if (step_stop_if_no_debug
5078 || !frame_id_p (frame_unwind_caller_id (frame)))
5079 {
5080 /* If we have no line number and the step-stop-if-no-debug
5081 is set, we stop the step so that the user has a chance to
5082 switch in assembly mode. */
5083 ecs->event_thread->control.stop_step = 1;
5084 print_end_stepping_range_reason ();
5085 stop_stepping (ecs);
5086 return;
5087 }
5088 else
5089 {
5090 /* Set a breakpoint at callee's return address (the address
5091 at which the caller will resume). */
5092 insert_step_resume_breakpoint_at_caller (frame);
5093 keep_going (ecs);
5094 return;
5095 }
5096 }
5097
5098 if (ecs->event_thread->control.step_range_end == 1)
5099 {
5100 /* It is stepi or nexti. We always want to stop stepping after
5101 one instruction. */
5102 if (debug_infrun)
5103 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5104 ecs->event_thread->control.stop_step = 1;
5105 print_end_stepping_range_reason ();
5106 stop_stepping (ecs);
5107 return;
5108 }
5109
5110 if (stop_pc_sal.line == 0)
5111 {
5112 /* We have no line number information. That means to stop
5113 stepping (does this always happen right after one instruction,
5114 when we do "s" in a function with no line numbers,
5115 or can this happen as a result of a return or longjmp?). */
5116 if (debug_infrun)
5117 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5118 ecs->event_thread->control.stop_step = 1;
5119 print_end_stepping_range_reason ();
5120 stop_stepping (ecs);
5121 return;
5122 }
5123
5124 /* Look for "calls" to inlined functions, part one. If the inline
5125 frame machinery detected some skipped call sites, we have entered
5126 a new inline function. */
5127
5128 if (frame_id_eq (get_frame_id (get_current_frame ()),
5129 ecs->event_thread->control.step_frame_id)
5130 && inline_skipped_frames (ecs->ptid))
5131 {
5132 struct symtab_and_line call_sal;
5133
5134 if (debug_infrun)
5135 fprintf_unfiltered (gdb_stdlog,
5136 "infrun: stepped into inlined function\n");
5137
5138 find_frame_sal (get_current_frame (), &call_sal);
5139
5140 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5141 {
5142 /* For "step", we're going to stop. But if the call site
5143 for this inlined function is on the same source line as
5144 we were previously stepping, go down into the function
5145 first. Otherwise stop at the call site. */
5146
5147 if (call_sal.line == ecs->event_thread->current_line
5148 && call_sal.symtab == ecs->event_thread->current_symtab)
5149 step_into_inline_frame (ecs->ptid);
5150
5151 ecs->event_thread->control.stop_step = 1;
5152 print_end_stepping_range_reason ();
5153 stop_stepping (ecs);
5154 return;
5155 }
5156 else
5157 {
5158 /* For "next", we should stop at the call site if it is on a
5159 different source line. Otherwise continue through the
5160 inlined function. */
5161 if (call_sal.line == ecs->event_thread->current_line
5162 && call_sal.symtab == ecs->event_thread->current_symtab)
5163 keep_going (ecs);
5164 else
5165 {
5166 ecs->event_thread->control.stop_step = 1;
5167 print_end_stepping_range_reason ();
5168 stop_stepping (ecs);
5169 }
5170 return;
5171 }
5172 }
5173
5174 /* Look for "calls" to inlined functions, part two. If we are still
5175 in the same real function we were stepping through, but we have
5176 to go further up to find the exact frame ID, we are stepping
5177 through a more inlined call beyond its call site. */
5178
5179 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5180 && !frame_id_eq (get_frame_id (get_current_frame ()),
5181 ecs->event_thread->control.step_frame_id)
5182 && stepped_in_from (get_current_frame (),
5183 ecs->event_thread->control.step_frame_id))
5184 {
5185 if (debug_infrun)
5186 fprintf_unfiltered (gdb_stdlog,
5187 "infrun: stepping through inlined function\n");
5188
5189 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5190 keep_going (ecs);
5191 else
5192 {
5193 ecs->event_thread->control.stop_step = 1;
5194 print_end_stepping_range_reason ();
5195 stop_stepping (ecs);
5196 }
5197 return;
5198 }
5199
5200 if ((stop_pc == stop_pc_sal.pc)
5201 && (ecs->event_thread->current_line != stop_pc_sal.line
5202 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5203 {
5204 /* We are at the start of a different line. So stop. Note that
5205 we don't stop if we step into the middle of a different line.
5206 That is said to make things like for (;;) statements work
5207 better. */
5208 if (debug_infrun)
5209 fprintf_unfiltered (gdb_stdlog,
5210 "infrun: stepped to a different line\n");
5211 ecs->event_thread->control.stop_step = 1;
5212 print_end_stepping_range_reason ();
5213 stop_stepping (ecs);
5214 return;
5215 }
5216
5217 /* We aren't done stepping.
5218
5219 Optimize by setting the stepping range to the line.
5220 (We might not be in the original line, but if we entered a
5221 new line in mid-statement, we continue stepping. This makes
5222 things like for(;;) statements work better.) */
5223
5224 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5225 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5226 set_step_info (frame, stop_pc_sal);
5227
5228 if (debug_infrun)
5229 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5230 keep_going (ecs);
5231 }
5232
5233 /* Is thread TP in the middle of single-stepping? */
5234
5235 static int
5236 currently_stepping (struct thread_info *tp)
5237 {
5238 return ((tp->control.step_range_end
5239 && tp->control.step_resume_breakpoint == NULL)
5240 || tp->control.trap_expected
5241 || bpstat_should_step ());
5242 }
5243
5244 /* Returns true if any thread *but* the one passed in "data" is in the
5245 middle of stepping or of handling a "next". */
5246
5247 static int
5248 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5249 {
5250 if (tp == data)
5251 return 0;
5252
5253 return (tp->control.step_range_end
5254 || tp->control.trap_expected);
5255 }
5256
5257 /* Inferior has stepped into a subroutine call with source code that
5258 we should not step over. Do step to the first line of code in
5259 it. */
5260
5261 static void
5262 handle_step_into_function (struct gdbarch *gdbarch,
5263 struct execution_control_state *ecs)
5264 {
5265 struct symtab *s;
5266 struct symtab_and_line stop_func_sal, sr_sal;
5267
5268 fill_in_stop_func (gdbarch, ecs);
5269
5270 s = find_pc_symtab (stop_pc);
5271 if (s && s->language != language_asm)
5272 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5273 ecs->stop_func_start);
5274
5275 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5276 /* Use the step_resume_break to step until the end of the prologue,
5277 even if that involves jumps (as it seems to on the vax under
5278 4.2). */
5279 /* If the prologue ends in the middle of a source line, continue to
5280 the end of that source line (if it is still within the function).
5281 Otherwise, just go to end of prologue. */
5282 if (stop_func_sal.end
5283 && stop_func_sal.pc != ecs->stop_func_start
5284 && stop_func_sal.end < ecs->stop_func_end)
5285 ecs->stop_func_start = stop_func_sal.end;
5286
5287 /* Architectures which require breakpoint adjustment might not be able
5288 to place a breakpoint at the computed address. If so, the test
5289 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5290 ecs->stop_func_start to an address at which a breakpoint may be
5291 legitimately placed.
5292
5293 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5294 made, GDB will enter an infinite loop when stepping through
5295 optimized code consisting of VLIW instructions which contain
5296 subinstructions corresponding to different source lines. On
5297 FR-V, it's not permitted to place a breakpoint on any but the
5298 first subinstruction of a VLIW instruction. When a breakpoint is
5299 set, GDB will adjust the breakpoint address to the beginning of
5300 the VLIW instruction. Thus, we need to make the corresponding
5301 adjustment here when computing the stop address. */
5302
5303 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5304 {
5305 ecs->stop_func_start
5306 = gdbarch_adjust_breakpoint_address (gdbarch,
5307 ecs->stop_func_start);
5308 }
5309
5310 if (ecs->stop_func_start == stop_pc)
5311 {
5312 /* We are already there: stop now. */
5313 ecs->event_thread->control.stop_step = 1;
5314 print_end_stepping_range_reason ();
5315 stop_stepping (ecs);
5316 return;
5317 }
5318 else
5319 {
5320 /* Put the step-breakpoint there and go until there. */
5321 init_sal (&sr_sal); /* initialize to zeroes */
5322 sr_sal.pc = ecs->stop_func_start;
5323 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5324 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5325
5326 /* Do not specify what the fp should be when we stop since on
5327 some machines the prologue is where the new fp value is
5328 established. */
5329 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5330
5331 /* And make sure stepping stops right away then. */
5332 ecs->event_thread->control.step_range_end
5333 = ecs->event_thread->control.step_range_start;
5334 }
5335 keep_going (ecs);
5336 }
5337
5338 /* Inferior has stepped backward into a subroutine call with source
5339 code that we should not step over. Do step to the beginning of the
5340 last line of code in it. */
5341
5342 static void
5343 handle_step_into_function_backward (struct gdbarch *gdbarch,
5344 struct execution_control_state *ecs)
5345 {
5346 struct symtab *s;
5347 struct symtab_and_line stop_func_sal;
5348
5349 fill_in_stop_func (gdbarch, ecs);
5350
5351 s = find_pc_symtab (stop_pc);
5352 if (s && s->language != language_asm)
5353 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5354 ecs->stop_func_start);
5355
5356 stop_func_sal = find_pc_line (stop_pc, 0);
5357
5358 /* OK, we're just going to keep stepping here. */
5359 if (stop_func_sal.pc == stop_pc)
5360 {
5361 /* We're there already. Just stop stepping now. */
5362 ecs->event_thread->control.stop_step = 1;
5363 print_end_stepping_range_reason ();
5364 stop_stepping (ecs);
5365 }
5366 else
5367 {
5368 /* Else just reset the step range and keep going.
5369 No step-resume breakpoint, they don't work for
5370 epilogues, which can have multiple entry paths. */
5371 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5372 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5373 keep_going (ecs);
5374 }
5375 return;
5376 }
5377
5378 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5379 This is used to both functions and to skip over code. */
5380
5381 static void
5382 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5383 struct symtab_and_line sr_sal,
5384 struct frame_id sr_id,
5385 enum bptype sr_type)
5386 {
5387 /* There should never be more than one step-resume or longjmp-resume
5388 breakpoint per thread, so we should never be setting a new
5389 step_resume_breakpoint when one is already active. */
5390 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5391 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5392
5393 if (debug_infrun)
5394 fprintf_unfiltered (gdb_stdlog,
5395 "infrun: inserting step-resume breakpoint at %s\n",
5396 paddress (gdbarch, sr_sal.pc));
5397
5398 inferior_thread ()->control.step_resume_breakpoint
5399 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5400 }
5401
5402 void
5403 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5404 struct symtab_and_line sr_sal,
5405 struct frame_id sr_id)
5406 {
5407 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5408 sr_sal, sr_id,
5409 bp_step_resume);
5410 }
5411
5412 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5413 This is used to skip a potential signal handler.
5414
5415 This is called with the interrupted function's frame. The signal
5416 handler, when it returns, will resume the interrupted function at
5417 RETURN_FRAME.pc. */
5418
5419 static void
5420 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5421 {
5422 struct symtab_and_line sr_sal;
5423 struct gdbarch *gdbarch;
5424
5425 gdb_assert (return_frame != NULL);
5426 init_sal (&sr_sal); /* initialize to zeros */
5427
5428 gdbarch = get_frame_arch (return_frame);
5429 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5430 sr_sal.section = find_pc_overlay (sr_sal.pc);
5431 sr_sal.pspace = get_frame_program_space (return_frame);
5432
5433 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5434 get_stack_frame_id (return_frame),
5435 bp_hp_step_resume);
5436 }
5437
5438 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5439 is used to skip a function after stepping into it (for "next" or if
5440 the called function has no debugging information).
5441
5442 The current function has almost always been reached by single
5443 stepping a call or return instruction. NEXT_FRAME belongs to the
5444 current function, and the breakpoint will be set at the caller's
5445 resume address.
5446
5447 This is a separate function rather than reusing
5448 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5449 get_prev_frame, which may stop prematurely (see the implementation
5450 of frame_unwind_caller_id for an example). */
5451
5452 static void
5453 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5454 {
5455 struct symtab_and_line sr_sal;
5456 struct gdbarch *gdbarch;
5457
5458 /* We shouldn't have gotten here if we don't know where the call site
5459 is. */
5460 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5461
5462 init_sal (&sr_sal); /* initialize to zeros */
5463
5464 gdbarch = frame_unwind_caller_arch (next_frame);
5465 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5466 frame_unwind_caller_pc (next_frame));
5467 sr_sal.section = find_pc_overlay (sr_sal.pc);
5468 sr_sal.pspace = frame_unwind_program_space (next_frame);
5469
5470 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5471 frame_unwind_caller_id (next_frame));
5472 }
5473
5474 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5475 new breakpoint at the target of a jmp_buf. The handling of
5476 longjmp-resume uses the same mechanisms used for handling
5477 "step-resume" breakpoints. */
5478
5479 static void
5480 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5481 {
5482 /* There should never be more than one step-resume or longjmp-resume
5483 breakpoint per thread, so we should never be setting a new
5484 longjmp_resume_breakpoint when one is already active. */
5485 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5486
5487 if (debug_infrun)
5488 fprintf_unfiltered (gdb_stdlog,
5489 "infrun: inserting longjmp-resume breakpoint at %s\n",
5490 paddress (gdbarch, pc));
5491
5492 inferior_thread ()->control.step_resume_breakpoint =
5493 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5494 }
5495
5496 /* Insert an exception resume breakpoint. TP is the thread throwing
5497 the exception. The block B is the block of the unwinder debug hook
5498 function. FRAME is the frame corresponding to the call to this
5499 function. SYM is the symbol of the function argument holding the
5500 target PC of the exception. */
5501
5502 static void
5503 insert_exception_resume_breakpoint (struct thread_info *tp,
5504 struct block *b,
5505 struct frame_info *frame,
5506 struct symbol *sym)
5507 {
5508 volatile struct gdb_exception e;
5509
5510 /* We want to ignore errors here. */
5511 TRY_CATCH (e, RETURN_MASK_ERROR)
5512 {
5513 struct symbol *vsym;
5514 struct value *value;
5515 CORE_ADDR handler;
5516 struct breakpoint *bp;
5517
5518 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5519 value = read_var_value (vsym, frame);
5520 /* If the value was optimized out, revert to the old behavior. */
5521 if (! value_optimized_out (value))
5522 {
5523 handler = value_as_address (value);
5524
5525 if (debug_infrun)
5526 fprintf_unfiltered (gdb_stdlog,
5527 "infrun: exception resume at %lx\n",
5528 (unsigned long) handler);
5529
5530 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5531 handler, bp_exception_resume);
5532
5533 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
5534 frame = NULL;
5535
5536 bp->thread = tp->num;
5537 inferior_thread ()->control.exception_resume_breakpoint = bp;
5538 }
5539 }
5540 }
5541
5542 /* This is called when an exception has been intercepted. Check to
5543 see whether the exception's destination is of interest, and if so,
5544 set an exception resume breakpoint there. */
5545
5546 static void
5547 check_exception_resume (struct execution_control_state *ecs,
5548 struct frame_info *frame, struct symbol *func)
5549 {
5550 volatile struct gdb_exception e;
5551
5552 TRY_CATCH (e, RETURN_MASK_ERROR)
5553 {
5554 struct block *b;
5555 struct dict_iterator iter;
5556 struct symbol *sym;
5557 int argno = 0;
5558
5559 /* The exception breakpoint is a thread-specific breakpoint on
5560 the unwinder's debug hook, declared as:
5561
5562 void _Unwind_DebugHook (void *cfa, void *handler);
5563
5564 The CFA argument indicates the frame to which control is
5565 about to be transferred. HANDLER is the destination PC.
5566
5567 We ignore the CFA and set a temporary breakpoint at HANDLER.
5568 This is not extremely efficient but it avoids issues in gdb
5569 with computing the DWARF CFA, and it also works even in weird
5570 cases such as throwing an exception from inside a signal
5571 handler. */
5572
5573 b = SYMBOL_BLOCK_VALUE (func);
5574 ALL_BLOCK_SYMBOLS (b, iter, sym)
5575 {
5576 if (!SYMBOL_IS_ARGUMENT (sym))
5577 continue;
5578
5579 if (argno == 0)
5580 ++argno;
5581 else
5582 {
5583 insert_exception_resume_breakpoint (ecs->event_thread,
5584 b, frame, sym);
5585 break;
5586 }
5587 }
5588 }
5589 }
5590
5591 static void
5592 stop_stepping (struct execution_control_state *ecs)
5593 {
5594 if (debug_infrun)
5595 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5596
5597 /* Let callers know we don't want to wait for the inferior anymore. */
5598 ecs->wait_some_more = 0;
5599 }
5600
5601 /* This function handles various cases where we need to continue
5602 waiting for the inferior. */
5603 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5604
5605 static void
5606 keep_going (struct execution_control_state *ecs)
5607 {
5608 /* Make sure normal_stop is called if we get a QUIT handled before
5609 reaching resume. */
5610 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5611
5612 /* Save the pc before execution, to compare with pc after stop. */
5613 ecs->event_thread->prev_pc
5614 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5615
5616 /* If we did not do break;, it means we should keep running the
5617 inferior and not return to debugger. */
5618
5619 if (ecs->event_thread->control.trap_expected
5620 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5621 {
5622 /* We took a signal (which we are supposed to pass through to
5623 the inferior, else we'd not get here) and we haven't yet
5624 gotten our trap. Simply continue. */
5625
5626 discard_cleanups (old_cleanups);
5627 resume (currently_stepping (ecs->event_thread),
5628 ecs->event_thread->suspend.stop_signal);
5629 }
5630 else
5631 {
5632 /* Either the trap was not expected, but we are continuing
5633 anyway (the user asked that this signal be passed to the
5634 child)
5635 -- or --
5636 The signal was SIGTRAP, e.g. it was our signal, but we
5637 decided we should resume from it.
5638
5639 We're going to run this baby now!
5640
5641 Note that insert_breakpoints won't try to re-insert
5642 already inserted breakpoints. Therefore, we don't
5643 care if breakpoints were already inserted, or not. */
5644
5645 if (ecs->event_thread->stepping_over_breakpoint)
5646 {
5647 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5648
5649 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5650 /* Since we can't do a displaced step, we have to remove
5651 the breakpoint while we step it. To keep things
5652 simple, we remove them all. */
5653 remove_breakpoints ();
5654 }
5655 else
5656 {
5657 volatile struct gdb_exception e;
5658
5659 /* Stop stepping when inserting breakpoints
5660 has failed. */
5661 TRY_CATCH (e, RETURN_MASK_ERROR)
5662 {
5663 insert_breakpoints ();
5664 }
5665 if (e.reason < 0)
5666 {
5667 exception_print (gdb_stderr, e);
5668 stop_stepping (ecs);
5669 return;
5670 }
5671 }
5672
5673 ecs->event_thread->control.trap_expected
5674 = ecs->event_thread->stepping_over_breakpoint;
5675
5676 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5677 specifies that such a signal should be delivered to the
5678 target program).
5679
5680 Typically, this would occure when a user is debugging a
5681 target monitor on a simulator: the target monitor sets a
5682 breakpoint; the simulator encounters this break-point and
5683 halts the simulation handing control to GDB; GDB, noteing
5684 that the break-point isn't valid, returns control back to the
5685 simulator; the simulator then delivers the hardware
5686 equivalent of a SIGNAL_TRAP to the program being debugged. */
5687
5688 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5689 && !signal_program[ecs->event_thread->suspend.stop_signal])
5690 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5691
5692 discard_cleanups (old_cleanups);
5693 resume (currently_stepping (ecs->event_thread),
5694 ecs->event_thread->suspend.stop_signal);
5695 }
5696
5697 prepare_to_wait (ecs);
5698 }
5699
5700 /* This function normally comes after a resume, before
5701 handle_inferior_event exits. It takes care of any last bits of
5702 housekeeping, and sets the all-important wait_some_more flag. */
5703
5704 static void
5705 prepare_to_wait (struct execution_control_state *ecs)
5706 {
5707 if (debug_infrun)
5708 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5709
5710 /* This is the old end of the while loop. Let everybody know we
5711 want to wait for the inferior some more and get called again
5712 soon. */
5713 ecs->wait_some_more = 1;
5714 }
5715
5716 /* Several print_*_reason functions to print why the inferior has stopped.
5717 We always print something when the inferior exits, or receives a signal.
5718 The rest of the cases are dealt with later on in normal_stop and
5719 print_it_typical. Ideally there should be a call to one of these
5720 print_*_reason functions functions from handle_inferior_event each time
5721 stop_stepping is called. */
5722
5723 /* Print why the inferior has stopped.
5724 We are done with a step/next/si/ni command, print why the inferior has
5725 stopped. For now print nothing. Print a message only if not in the middle
5726 of doing a "step n" operation for n > 1. */
5727
5728 static void
5729 print_end_stepping_range_reason (void)
5730 {
5731 if ((!inferior_thread ()->step_multi
5732 || !inferior_thread ()->control.stop_step)
5733 && ui_out_is_mi_like_p (current_uiout))
5734 ui_out_field_string (current_uiout, "reason",
5735 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5736 }
5737
5738 /* The inferior was terminated by a signal, print why it stopped. */
5739
5740 static void
5741 print_signal_exited_reason (enum target_signal siggnal)
5742 {
5743 struct ui_out *uiout = current_uiout;
5744
5745 annotate_signalled ();
5746 if (ui_out_is_mi_like_p (uiout))
5747 ui_out_field_string
5748 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5749 ui_out_text (uiout, "\nProgram terminated with signal ");
5750 annotate_signal_name ();
5751 ui_out_field_string (uiout, "signal-name",
5752 target_signal_to_name (siggnal));
5753 annotate_signal_name_end ();
5754 ui_out_text (uiout, ", ");
5755 annotate_signal_string ();
5756 ui_out_field_string (uiout, "signal-meaning",
5757 target_signal_to_string (siggnal));
5758 annotate_signal_string_end ();
5759 ui_out_text (uiout, ".\n");
5760 ui_out_text (uiout, "The program no longer exists.\n");
5761 }
5762
5763 /* The inferior program is finished, print why it stopped. */
5764
5765 static void
5766 print_exited_reason (int exitstatus)
5767 {
5768 struct inferior *inf = current_inferior ();
5769 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5770 struct ui_out *uiout = current_uiout;
5771
5772 annotate_exited (exitstatus);
5773 if (exitstatus)
5774 {
5775 if (ui_out_is_mi_like_p (uiout))
5776 ui_out_field_string (uiout, "reason",
5777 async_reason_lookup (EXEC_ASYNC_EXITED));
5778 ui_out_text (uiout, "[Inferior ");
5779 ui_out_text (uiout, plongest (inf->num));
5780 ui_out_text (uiout, " (");
5781 ui_out_text (uiout, pidstr);
5782 ui_out_text (uiout, ") exited with code ");
5783 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5784 ui_out_text (uiout, "]\n");
5785 }
5786 else
5787 {
5788 if (ui_out_is_mi_like_p (uiout))
5789 ui_out_field_string
5790 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5791 ui_out_text (uiout, "[Inferior ");
5792 ui_out_text (uiout, plongest (inf->num));
5793 ui_out_text (uiout, " (");
5794 ui_out_text (uiout, pidstr);
5795 ui_out_text (uiout, ") exited normally]\n");
5796 }
5797 /* Support the --return-child-result option. */
5798 return_child_result_value = exitstatus;
5799 }
5800
5801 /* Signal received, print why the inferior has stopped. The signal table
5802 tells us to print about it. */
5803
5804 static void
5805 print_signal_received_reason (enum target_signal siggnal)
5806 {
5807 struct ui_out *uiout = current_uiout;
5808
5809 annotate_signal ();
5810
5811 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5812 {
5813 struct thread_info *t = inferior_thread ();
5814
5815 ui_out_text (uiout, "\n[");
5816 ui_out_field_string (uiout, "thread-name",
5817 target_pid_to_str (t->ptid));
5818 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5819 ui_out_text (uiout, " stopped");
5820 }
5821 else
5822 {
5823 ui_out_text (uiout, "\nProgram received signal ");
5824 annotate_signal_name ();
5825 if (ui_out_is_mi_like_p (uiout))
5826 ui_out_field_string
5827 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5828 ui_out_field_string (uiout, "signal-name",
5829 target_signal_to_name (siggnal));
5830 annotate_signal_name_end ();
5831 ui_out_text (uiout, ", ");
5832 annotate_signal_string ();
5833 ui_out_field_string (uiout, "signal-meaning",
5834 target_signal_to_string (siggnal));
5835 annotate_signal_string_end ();
5836 }
5837 ui_out_text (uiout, ".\n");
5838 }
5839
5840 /* Reverse execution: target ran out of history info, print why the inferior
5841 has stopped. */
5842
5843 static void
5844 print_no_history_reason (void)
5845 {
5846 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5847 }
5848
5849 /* Here to return control to GDB when the inferior stops for real.
5850 Print appropriate messages, remove breakpoints, give terminal our modes.
5851
5852 STOP_PRINT_FRAME nonzero means print the executing frame
5853 (pc, function, args, file, line number and line text).
5854 BREAKPOINTS_FAILED nonzero means stop was due to error
5855 attempting to insert breakpoints. */
5856
5857 void
5858 normal_stop (void)
5859 {
5860 struct target_waitstatus last;
5861 ptid_t last_ptid;
5862 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5863
5864 get_last_target_status (&last_ptid, &last);
5865
5866 /* If an exception is thrown from this point on, make sure to
5867 propagate GDB's knowledge of the executing state to the
5868 frontend/user running state. A QUIT is an easy exception to see
5869 here, so do this before any filtered output. */
5870 if (!non_stop)
5871 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5872 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5873 && last.kind != TARGET_WAITKIND_EXITED
5874 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5875 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5876
5877 /* In non-stop mode, we don't want GDB to switch threads behind the
5878 user's back, to avoid races where the user is typing a command to
5879 apply to thread x, but GDB switches to thread y before the user
5880 finishes entering the command. */
5881
5882 /* As with the notification of thread events, we want to delay
5883 notifying the user that we've switched thread context until
5884 the inferior actually stops.
5885
5886 There's no point in saying anything if the inferior has exited.
5887 Note that SIGNALLED here means "exited with a signal", not
5888 "received a signal". */
5889 if (!non_stop
5890 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5891 && target_has_execution
5892 && last.kind != TARGET_WAITKIND_SIGNALLED
5893 && last.kind != TARGET_WAITKIND_EXITED
5894 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5895 {
5896 target_terminal_ours_for_output ();
5897 printf_filtered (_("[Switching to %s]\n"),
5898 target_pid_to_str (inferior_ptid));
5899 annotate_thread_changed ();
5900 previous_inferior_ptid = inferior_ptid;
5901 }
5902
5903 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
5904 {
5905 gdb_assert (sync_execution || !target_can_async_p ());
5906
5907 target_terminal_ours_for_output ();
5908 printf_filtered (_("No unwaited-for children left.\n"));
5909 }
5910
5911 if (!breakpoints_always_inserted_mode () && target_has_execution)
5912 {
5913 if (remove_breakpoints ())
5914 {
5915 target_terminal_ours_for_output ();
5916 printf_filtered (_("Cannot remove breakpoints because "
5917 "program is no longer writable.\nFurther "
5918 "execution is probably impossible.\n"));
5919 }
5920 }
5921
5922 /* If an auto-display called a function and that got a signal,
5923 delete that auto-display to avoid an infinite recursion. */
5924
5925 if (stopped_by_random_signal)
5926 disable_current_display ();
5927
5928 /* Don't print a message if in the middle of doing a "step n"
5929 operation for n > 1 */
5930 if (target_has_execution
5931 && last.kind != TARGET_WAITKIND_SIGNALLED
5932 && last.kind != TARGET_WAITKIND_EXITED
5933 && inferior_thread ()->step_multi
5934 && inferior_thread ()->control.stop_step)
5935 goto done;
5936
5937 target_terminal_ours ();
5938 async_enable_stdin ();
5939
5940 /* Set the current source location. This will also happen if we
5941 display the frame below, but the current SAL will be incorrect
5942 during a user hook-stop function. */
5943 if (has_stack_frames () && !stop_stack_dummy)
5944 set_current_sal_from_frame (get_current_frame (), 1);
5945
5946 /* Let the user/frontend see the threads as stopped. */
5947 do_cleanups (old_chain);
5948
5949 /* Look up the hook_stop and run it (CLI internally handles problem
5950 of stop_command's pre-hook not existing). */
5951 if (stop_command)
5952 catch_errors (hook_stop_stub, stop_command,
5953 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5954
5955 if (!has_stack_frames ())
5956 goto done;
5957
5958 if (last.kind == TARGET_WAITKIND_SIGNALLED
5959 || last.kind == TARGET_WAITKIND_EXITED)
5960 goto done;
5961
5962 /* Select innermost stack frame - i.e., current frame is frame 0,
5963 and current location is based on that.
5964 Don't do this on return from a stack dummy routine,
5965 or if the program has exited. */
5966
5967 if (!stop_stack_dummy)
5968 {
5969 select_frame (get_current_frame ());
5970
5971 /* Print current location without a level number, if
5972 we have changed functions or hit a breakpoint.
5973 Print source line if we have one.
5974 bpstat_print() contains the logic deciding in detail
5975 what to print, based on the event(s) that just occurred. */
5976
5977 /* If --batch-silent is enabled then there's no need to print the current
5978 source location, and to try risks causing an error message about
5979 missing source files. */
5980 if (stop_print_frame && !batch_silent)
5981 {
5982 int bpstat_ret;
5983 int source_flag;
5984 int do_frame_printing = 1;
5985 struct thread_info *tp = inferior_thread ();
5986
5987 bpstat_ret = bpstat_print (tp->control.stop_bpstat, last.kind);
5988 switch (bpstat_ret)
5989 {
5990 case PRINT_UNKNOWN:
5991 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5992 (or should) carry around the function and does (or
5993 should) use that when doing a frame comparison. */
5994 if (tp->control.stop_step
5995 && frame_id_eq (tp->control.step_frame_id,
5996 get_frame_id (get_current_frame ()))
5997 && step_start_function == find_pc_function (stop_pc))
5998 source_flag = SRC_LINE; /* Finished step, just
5999 print source line. */
6000 else
6001 source_flag = SRC_AND_LOC; /* Print location and
6002 source line. */
6003 break;
6004 case PRINT_SRC_AND_LOC:
6005 source_flag = SRC_AND_LOC; /* Print location and
6006 source line. */
6007 break;
6008 case PRINT_SRC_ONLY:
6009 source_flag = SRC_LINE;
6010 break;
6011 case PRINT_NOTHING:
6012 source_flag = SRC_LINE; /* something bogus */
6013 do_frame_printing = 0;
6014 break;
6015 default:
6016 internal_error (__FILE__, __LINE__, _("Unknown value."));
6017 }
6018
6019 /* The behavior of this routine with respect to the source
6020 flag is:
6021 SRC_LINE: Print only source line
6022 LOCATION: Print only location
6023 SRC_AND_LOC: Print location and source line. */
6024 if (do_frame_printing)
6025 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
6026
6027 /* Display the auto-display expressions. */
6028 do_displays ();
6029 }
6030 }
6031
6032 /* Save the function value return registers, if we care.
6033 We might be about to restore their previous contents. */
6034 if (inferior_thread ()->control.proceed_to_finish
6035 && execution_direction != EXEC_REVERSE)
6036 {
6037 /* This should not be necessary. */
6038 if (stop_registers)
6039 regcache_xfree (stop_registers);
6040
6041 /* NB: The copy goes through to the target picking up the value of
6042 all the registers. */
6043 stop_registers = regcache_dup (get_current_regcache ());
6044 }
6045
6046 if (stop_stack_dummy == STOP_STACK_DUMMY)
6047 {
6048 /* Pop the empty frame that contains the stack dummy.
6049 This also restores inferior state prior to the call
6050 (struct infcall_suspend_state). */
6051 struct frame_info *frame = get_current_frame ();
6052
6053 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6054 frame_pop (frame);
6055 /* frame_pop() calls reinit_frame_cache as the last thing it
6056 does which means there's currently no selected frame. We
6057 don't need to re-establish a selected frame if the dummy call
6058 returns normally, that will be done by
6059 restore_infcall_control_state. However, we do have to handle
6060 the case where the dummy call is returning after being
6061 stopped (e.g. the dummy call previously hit a breakpoint).
6062 We can't know which case we have so just always re-establish
6063 a selected frame here. */
6064 select_frame (get_current_frame ());
6065 }
6066
6067 done:
6068 annotate_stopped ();
6069
6070 /* Suppress the stop observer if we're in the middle of:
6071
6072 - a step n (n > 1), as there still more steps to be done.
6073
6074 - a "finish" command, as the observer will be called in
6075 finish_command_continuation, so it can include the inferior
6076 function's return value.
6077
6078 - calling an inferior function, as we pretend we inferior didn't
6079 run at all. The return value of the call is handled by the
6080 expression evaluator, through call_function_by_hand. */
6081
6082 if (!target_has_execution
6083 || last.kind == TARGET_WAITKIND_SIGNALLED
6084 || last.kind == TARGET_WAITKIND_EXITED
6085 || last.kind == TARGET_WAITKIND_NO_RESUMED
6086 || (!(inferior_thread ()->step_multi
6087 && inferior_thread ()->control.stop_step)
6088 && !(inferior_thread ()->control.stop_bpstat
6089 && inferior_thread ()->control.proceed_to_finish)
6090 && !inferior_thread ()->control.in_infcall))
6091 {
6092 if (!ptid_equal (inferior_ptid, null_ptid))
6093 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6094 stop_print_frame);
6095 else
6096 observer_notify_normal_stop (NULL, stop_print_frame);
6097 }
6098
6099 if (target_has_execution)
6100 {
6101 if (last.kind != TARGET_WAITKIND_SIGNALLED
6102 && last.kind != TARGET_WAITKIND_EXITED)
6103 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6104 Delete any breakpoint that is to be deleted at the next stop. */
6105 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6106 }
6107
6108 /* Try to get rid of automatically added inferiors that are no
6109 longer needed. Keeping those around slows down things linearly.
6110 Note that this never removes the current inferior. */
6111 prune_inferiors ();
6112 }
6113
6114 static int
6115 hook_stop_stub (void *cmd)
6116 {
6117 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6118 return (0);
6119 }
6120 \f
6121 int
6122 signal_stop_state (int signo)
6123 {
6124 return signal_stop[signo];
6125 }
6126
6127 int
6128 signal_print_state (int signo)
6129 {
6130 return signal_print[signo];
6131 }
6132
6133 int
6134 signal_pass_state (int signo)
6135 {
6136 return signal_program[signo];
6137 }
6138
6139 static void
6140 signal_cache_update (int signo)
6141 {
6142 if (signo == -1)
6143 {
6144 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6145 signal_cache_update (signo);
6146
6147 return;
6148 }
6149
6150 signal_pass[signo] = (signal_stop[signo] == 0
6151 && signal_print[signo] == 0
6152 && signal_program[signo] == 1);
6153 }
6154
6155 int
6156 signal_stop_update (int signo, int state)
6157 {
6158 int ret = signal_stop[signo];
6159
6160 signal_stop[signo] = state;
6161 signal_cache_update (signo);
6162 return ret;
6163 }
6164
6165 int
6166 signal_print_update (int signo, int state)
6167 {
6168 int ret = signal_print[signo];
6169
6170 signal_print[signo] = state;
6171 signal_cache_update (signo);
6172 return ret;
6173 }
6174
6175 int
6176 signal_pass_update (int signo, int state)
6177 {
6178 int ret = signal_program[signo];
6179
6180 signal_program[signo] = state;
6181 signal_cache_update (signo);
6182 return ret;
6183 }
6184
6185 static void
6186 sig_print_header (void)
6187 {
6188 printf_filtered (_("Signal Stop\tPrint\tPass "
6189 "to program\tDescription\n"));
6190 }
6191
6192 static void
6193 sig_print_info (enum target_signal oursig)
6194 {
6195 const char *name = target_signal_to_name (oursig);
6196 int name_padding = 13 - strlen (name);
6197
6198 if (name_padding <= 0)
6199 name_padding = 0;
6200
6201 printf_filtered ("%s", name);
6202 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6203 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6204 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6205 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6206 printf_filtered ("%s\n", target_signal_to_string (oursig));
6207 }
6208
6209 /* Specify how various signals in the inferior should be handled. */
6210
6211 static void
6212 handle_command (char *args, int from_tty)
6213 {
6214 char **argv;
6215 int digits, wordlen;
6216 int sigfirst, signum, siglast;
6217 enum target_signal oursig;
6218 int allsigs;
6219 int nsigs;
6220 unsigned char *sigs;
6221 struct cleanup *old_chain;
6222
6223 if (args == NULL)
6224 {
6225 error_no_arg (_("signal to handle"));
6226 }
6227
6228 /* Allocate and zero an array of flags for which signals to handle. */
6229
6230 nsigs = (int) TARGET_SIGNAL_LAST;
6231 sigs = (unsigned char *) alloca (nsigs);
6232 memset (sigs, 0, nsigs);
6233
6234 /* Break the command line up into args. */
6235
6236 argv = gdb_buildargv (args);
6237 old_chain = make_cleanup_freeargv (argv);
6238
6239 /* Walk through the args, looking for signal oursigs, signal names, and
6240 actions. Signal numbers and signal names may be interspersed with
6241 actions, with the actions being performed for all signals cumulatively
6242 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6243
6244 while (*argv != NULL)
6245 {
6246 wordlen = strlen (*argv);
6247 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6248 {;
6249 }
6250 allsigs = 0;
6251 sigfirst = siglast = -1;
6252
6253 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6254 {
6255 /* Apply action to all signals except those used by the
6256 debugger. Silently skip those. */
6257 allsigs = 1;
6258 sigfirst = 0;
6259 siglast = nsigs - 1;
6260 }
6261 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6262 {
6263 SET_SIGS (nsigs, sigs, signal_stop);
6264 SET_SIGS (nsigs, sigs, signal_print);
6265 }
6266 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6267 {
6268 UNSET_SIGS (nsigs, sigs, signal_program);
6269 }
6270 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6271 {
6272 SET_SIGS (nsigs, sigs, signal_print);
6273 }
6274 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6275 {
6276 SET_SIGS (nsigs, sigs, signal_program);
6277 }
6278 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6279 {
6280 UNSET_SIGS (nsigs, sigs, signal_stop);
6281 }
6282 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6283 {
6284 SET_SIGS (nsigs, sigs, signal_program);
6285 }
6286 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6287 {
6288 UNSET_SIGS (nsigs, sigs, signal_print);
6289 UNSET_SIGS (nsigs, sigs, signal_stop);
6290 }
6291 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6292 {
6293 UNSET_SIGS (nsigs, sigs, signal_program);
6294 }
6295 else if (digits > 0)
6296 {
6297 /* It is numeric. The numeric signal refers to our own
6298 internal signal numbering from target.h, not to host/target
6299 signal number. This is a feature; users really should be
6300 using symbolic names anyway, and the common ones like
6301 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6302
6303 sigfirst = siglast = (int)
6304 target_signal_from_command (atoi (*argv));
6305 if ((*argv)[digits] == '-')
6306 {
6307 siglast = (int)
6308 target_signal_from_command (atoi ((*argv) + digits + 1));
6309 }
6310 if (sigfirst > siglast)
6311 {
6312 /* Bet he didn't figure we'd think of this case... */
6313 signum = sigfirst;
6314 sigfirst = siglast;
6315 siglast = signum;
6316 }
6317 }
6318 else
6319 {
6320 oursig = target_signal_from_name (*argv);
6321 if (oursig != TARGET_SIGNAL_UNKNOWN)
6322 {
6323 sigfirst = siglast = (int) oursig;
6324 }
6325 else
6326 {
6327 /* Not a number and not a recognized flag word => complain. */
6328 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6329 }
6330 }
6331
6332 /* If any signal numbers or symbol names were found, set flags for
6333 which signals to apply actions to. */
6334
6335 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6336 {
6337 switch ((enum target_signal) signum)
6338 {
6339 case TARGET_SIGNAL_TRAP:
6340 case TARGET_SIGNAL_INT:
6341 if (!allsigs && !sigs[signum])
6342 {
6343 if (query (_("%s is used by the debugger.\n\
6344 Are you sure you want to change it? "),
6345 target_signal_to_name ((enum target_signal) signum)))
6346 {
6347 sigs[signum] = 1;
6348 }
6349 else
6350 {
6351 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6352 gdb_flush (gdb_stdout);
6353 }
6354 }
6355 break;
6356 case TARGET_SIGNAL_0:
6357 case TARGET_SIGNAL_DEFAULT:
6358 case TARGET_SIGNAL_UNKNOWN:
6359 /* Make sure that "all" doesn't print these. */
6360 break;
6361 default:
6362 sigs[signum] = 1;
6363 break;
6364 }
6365 }
6366
6367 argv++;
6368 }
6369
6370 for (signum = 0; signum < nsigs; signum++)
6371 if (sigs[signum])
6372 {
6373 signal_cache_update (-1);
6374 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6375 target_program_signals ((int) TARGET_SIGNAL_LAST, signal_program);
6376
6377 if (from_tty)
6378 {
6379 /* Show the results. */
6380 sig_print_header ();
6381 for (; signum < nsigs; signum++)
6382 if (sigs[signum])
6383 sig_print_info (signum);
6384 }
6385
6386 break;
6387 }
6388
6389 do_cleanups (old_chain);
6390 }
6391
6392 static void
6393 xdb_handle_command (char *args, int from_tty)
6394 {
6395 char **argv;
6396 struct cleanup *old_chain;
6397
6398 if (args == NULL)
6399 error_no_arg (_("xdb command"));
6400
6401 /* Break the command line up into args. */
6402
6403 argv = gdb_buildargv (args);
6404 old_chain = make_cleanup_freeargv (argv);
6405 if (argv[1] != (char *) NULL)
6406 {
6407 char *argBuf;
6408 int bufLen;
6409
6410 bufLen = strlen (argv[0]) + 20;
6411 argBuf = (char *) xmalloc (bufLen);
6412 if (argBuf)
6413 {
6414 int validFlag = 1;
6415 enum target_signal oursig;
6416
6417 oursig = target_signal_from_name (argv[0]);
6418 memset (argBuf, 0, bufLen);
6419 if (strcmp (argv[1], "Q") == 0)
6420 sprintf (argBuf, "%s %s", argv[0], "noprint");
6421 else
6422 {
6423 if (strcmp (argv[1], "s") == 0)
6424 {
6425 if (!signal_stop[oursig])
6426 sprintf (argBuf, "%s %s", argv[0], "stop");
6427 else
6428 sprintf (argBuf, "%s %s", argv[0], "nostop");
6429 }
6430 else if (strcmp (argv[1], "i") == 0)
6431 {
6432 if (!signal_program[oursig])
6433 sprintf (argBuf, "%s %s", argv[0], "pass");
6434 else
6435 sprintf (argBuf, "%s %s", argv[0], "nopass");
6436 }
6437 else if (strcmp (argv[1], "r") == 0)
6438 {
6439 if (!signal_print[oursig])
6440 sprintf (argBuf, "%s %s", argv[0], "print");
6441 else
6442 sprintf (argBuf, "%s %s", argv[0], "noprint");
6443 }
6444 else
6445 validFlag = 0;
6446 }
6447 if (validFlag)
6448 handle_command (argBuf, from_tty);
6449 else
6450 printf_filtered (_("Invalid signal handling flag.\n"));
6451 if (argBuf)
6452 xfree (argBuf);
6453 }
6454 }
6455 do_cleanups (old_chain);
6456 }
6457
6458 enum target_signal
6459 target_signal_from_command (int num)
6460 {
6461 if (num >= 1 && num <= 15)
6462 return (enum target_signal) num;
6463 error (_("Only signals 1-15 are valid as numeric signals.\n\
6464 Use \"info signals\" for a list of symbolic signals."));
6465 }
6466
6467 /* Print current contents of the tables set by the handle command.
6468 It is possible we should just be printing signals actually used
6469 by the current target (but for things to work right when switching
6470 targets, all signals should be in the signal tables). */
6471
6472 static void
6473 signals_info (char *signum_exp, int from_tty)
6474 {
6475 enum target_signal oursig;
6476
6477 sig_print_header ();
6478
6479 if (signum_exp)
6480 {
6481 /* First see if this is a symbol name. */
6482 oursig = target_signal_from_name (signum_exp);
6483 if (oursig == TARGET_SIGNAL_UNKNOWN)
6484 {
6485 /* No, try numeric. */
6486 oursig =
6487 target_signal_from_command (parse_and_eval_long (signum_exp));
6488 }
6489 sig_print_info (oursig);
6490 return;
6491 }
6492
6493 printf_filtered ("\n");
6494 /* These ugly casts brought to you by the native VAX compiler. */
6495 for (oursig = TARGET_SIGNAL_FIRST;
6496 (int) oursig < (int) TARGET_SIGNAL_LAST;
6497 oursig = (enum target_signal) ((int) oursig + 1))
6498 {
6499 QUIT;
6500
6501 if (oursig != TARGET_SIGNAL_UNKNOWN
6502 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6503 sig_print_info (oursig);
6504 }
6505
6506 printf_filtered (_("\nUse the \"handle\" command "
6507 "to change these tables.\n"));
6508 }
6509
6510 /* Check if it makes sense to read $_siginfo from the current thread
6511 at this point. If not, throw an error. */
6512
6513 static void
6514 validate_siginfo_access (void)
6515 {
6516 /* No current inferior, no siginfo. */
6517 if (ptid_equal (inferior_ptid, null_ptid))
6518 error (_("No thread selected."));
6519
6520 /* Don't try to read from a dead thread. */
6521 if (is_exited (inferior_ptid))
6522 error (_("The current thread has terminated"));
6523
6524 /* ... or from a spinning thread. */
6525 if (is_running (inferior_ptid))
6526 error (_("Selected thread is running."));
6527 }
6528
6529 /* The $_siginfo convenience variable is a bit special. We don't know
6530 for sure the type of the value until we actually have a chance to
6531 fetch the data. The type can change depending on gdbarch, so it is
6532 also dependent on which thread you have selected.
6533
6534 1. making $_siginfo be an internalvar that creates a new value on
6535 access.
6536
6537 2. making the value of $_siginfo be an lval_computed value. */
6538
6539 /* This function implements the lval_computed support for reading a
6540 $_siginfo value. */
6541
6542 static void
6543 siginfo_value_read (struct value *v)
6544 {
6545 LONGEST transferred;
6546
6547 validate_siginfo_access ();
6548
6549 transferred =
6550 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6551 NULL,
6552 value_contents_all_raw (v),
6553 value_offset (v),
6554 TYPE_LENGTH (value_type (v)));
6555
6556 if (transferred != TYPE_LENGTH (value_type (v)))
6557 error (_("Unable to read siginfo"));
6558 }
6559
6560 /* This function implements the lval_computed support for writing a
6561 $_siginfo value. */
6562
6563 static void
6564 siginfo_value_write (struct value *v, struct value *fromval)
6565 {
6566 LONGEST transferred;
6567
6568 validate_siginfo_access ();
6569
6570 transferred = target_write (&current_target,
6571 TARGET_OBJECT_SIGNAL_INFO,
6572 NULL,
6573 value_contents_all_raw (fromval),
6574 value_offset (v),
6575 TYPE_LENGTH (value_type (fromval)));
6576
6577 if (transferred != TYPE_LENGTH (value_type (fromval)))
6578 error (_("Unable to write siginfo"));
6579 }
6580
6581 static const struct lval_funcs siginfo_value_funcs =
6582 {
6583 siginfo_value_read,
6584 siginfo_value_write
6585 };
6586
6587 /* Return a new value with the correct type for the siginfo object of
6588 the current thread using architecture GDBARCH. Return a void value
6589 if there's no object available. */
6590
6591 static struct value *
6592 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6593 {
6594 if (target_has_stack
6595 && !ptid_equal (inferior_ptid, null_ptid)
6596 && gdbarch_get_siginfo_type_p (gdbarch))
6597 {
6598 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6599
6600 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6601 }
6602
6603 return allocate_value (builtin_type (gdbarch)->builtin_void);
6604 }
6605
6606 \f
6607 /* infcall_suspend_state contains state about the program itself like its
6608 registers and any signal it received when it last stopped.
6609 This state must be restored regardless of how the inferior function call
6610 ends (either successfully, or after it hits a breakpoint or signal)
6611 if the program is to properly continue where it left off. */
6612
6613 struct infcall_suspend_state
6614 {
6615 struct thread_suspend_state thread_suspend;
6616 struct inferior_suspend_state inferior_suspend;
6617
6618 /* Other fields: */
6619 CORE_ADDR stop_pc;
6620 struct regcache *registers;
6621
6622 /* Format of SIGINFO_DATA or NULL if it is not present. */
6623 struct gdbarch *siginfo_gdbarch;
6624
6625 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6626 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6627 content would be invalid. */
6628 gdb_byte *siginfo_data;
6629 };
6630
6631 struct infcall_suspend_state *
6632 save_infcall_suspend_state (void)
6633 {
6634 struct infcall_suspend_state *inf_state;
6635 struct thread_info *tp = inferior_thread ();
6636 struct inferior *inf = current_inferior ();
6637 struct regcache *regcache = get_current_regcache ();
6638 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6639 gdb_byte *siginfo_data = NULL;
6640
6641 if (gdbarch_get_siginfo_type_p (gdbarch))
6642 {
6643 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6644 size_t len = TYPE_LENGTH (type);
6645 struct cleanup *back_to;
6646
6647 siginfo_data = xmalloc (len);
6648 back_to = make_cleanup (xfree, siginfo_data);
6649
6650 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6651 siginfo_data, 0, len) == len)
6652 discard_cleanups (back_to);
6653 else
6654 {
6655 /* Errors ignored. */
6656 do_cleanups (back_to);
6657 siginfo_data = NULL;
6658 }
6659 }
6660
6661 inf_state = XZALLOC (struct infcall_suspend_state);
6662
6663 if (siginfo_data)
6664 {
6665 inf_state->siginfo_gdbarch = gdbarch;
6666 inf_state->siginfo_data = siginfo_data;
6667 }
6668
6669 inf_state->thread_suspend = tp->suspend;
6670 inf_state->inferior_suspend = inf->suspend;
6671
6672 /* run_inferior_call will not use the signal due to its `proceed' call with
6673 TARGET_SIGNAL_0 anyway. */
6674 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6675
6676 inf_state->stop_pc = stop_pc;
6677
6678 inf_state->registers = regcache_dup (regcache);
6679
6680 return inf_state;
6681 }
6682
6683 /* Restore inferior session state to INF_STATE. */
6684
6685 void
6686 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6687 {
6688 struct thread_info *tp = inferior_thread ();
6689 struct inferior *inf = current_inferior ();
6690 struct regcache *regcache = get_current_regcache ();
6691 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6692
6693 tp->suspend = inf_state->thread_suspend;
6694 inf->suspend = inf_state->inferior_suspend;
6695
6696 stop_pc = inf_state->stop_pc;
6697
6698 if (inf_state->siginfo_gdbarch == gdbarch)
6699 {
6700 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6701 size_t len = TYPE_LENGTH (type);
6702
6703 /* Errors ignored. */
6704 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6705 inf_state->siginfo_data, 0, len);
6706 }
6707
6708 /* The inferior can be gone if the user types "print exit(0)"
6709 (and perhaps other times). */
6710 if (target_has_execution)
6711 /* NB: The register write goes through to the target. */
6712 regcache_cpy (regcache, inf_state->registers);
6713
6714 discard_infcall_suspend_state (inf_state);
6715 }
6716
6717 static void
6718 do_restore_infcall_suspend_state_cleanup (void *state)
6719 {
6720 restore_infcall_suspend_state (state);
6721 }
6722
6723 struct cleanup *
6724 make_cleanup_restore_infcall_suspend_state
6725 (struct infcall_suspend_state *inf_state)
6726 {
6727 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6728 }
6729
6730 void
6731 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6732 {
6733 regcache_xfree (inf_state->registers);
6734 xfree (inf_state->siginfo_data);
6735 xfree (inf_state);
6736 }
6737
6738 struct regcache *
6739 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6740 {
6741 return inf_state->registers;
6742 }
6743
6744 /* infcall_control_state contains state regarding gdb's control of the
6745 inferior itself like stepping control. It also contains session state like
6746 the user's currently selected frame. */
6747
6748 struct infcall_control_state
6749 {
6750 struct thread_control_state thread_control;
6751 struct inferior_control_state inferior_control;
6752
6753 /* Other fields: */
6754 enum stop_stack_kind stop_stack_dummy;
6755 int stopped_by_random_signal;
6756 int stop_after_trap;
6757
6758 /* ID if the selected frame when the inferior function call was made. */
6759 struct frame_id selected_frame_id;
6760 };
6761
6762 /* Save all of the information associated with the inferior<==>gdb
6763 connection. */
6764
6765 struct infcall_control_state *
6766 save_infcall_control_state (void)
6767 {
6768 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6769 struct thread_info *tp = inferior_thread ();
6770 struct inferior *inf = current_inferior ();
6771
6772 inf_status->thread_control = tp->control;
6773 inf_status->inferior_control = inf->control;
6774
6775 tp->control.step_resume_breakpoint = NULL;
6776 tp->control.exception_resume_breakpoint = NULL;
6777
6778 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6779 chain. If caller's caller is walking the chain, they'll be happier if we
6780 hand them back the original chain when restore_infcall_control_state is
6781 called. */
6782 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6783
6784 /* Other fields: */
6785 inf_status->stop_stack_dummy = stop_stack_dummy;
6786 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6787 inf_status->stop_after_trap = stop_after_trap;
6788
6789 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6790
6791 return inf_status;
6792 }
6793
6794 static int
6795 restore_selected_frame (void *args)
6796 {
6797 struct frame_id *fid = (struct frame_id *) args;
6798 struct frame_info *frame;
6799
6800 frame = frame_find_by_id (*fid);
6801
6802 /* If inf_status->selected_frame_id is NULL, there was no previously
6803 selected frame. */
6804 if (frame == NULL)
6805 {
6806 warning (_("Unable to restore previously selected frame."));
6807 return 0;
6808 }
6809
6810 select_frame (frame);
6811
6812 return (1);
6813 }
6814
6815 /* Restore inferior session state to INF_STATUS. */
6816
6817 void
6818 restore_infcall_control_state (struct infcall_control_state *inf_status)
6819 {
6820 struct thread_info *tp = inferior_thread ();
6821 struct inferior *inf = current_inferior ();
6822
6823 if (tp->control.step_resume_breakpoint)
6824 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6825
6826 if (tp->control.exception_resume_breakpoint)
6827 tp->control.exception_resume_breakpoint->disposition
6828 = disp_del_at_next_stop;
6829
6830 /* Handle the bpstat_copy of the chain. */
6831 bpstat_clear (&tp->control.stop_bpstat);
6832
6833 tp->control = inf_status->thread_control;
6834 inf->control = inf_status->inferior_control;
6835
6836 /* Other fields: */
6837 stop_stack_dummy = inf_status->stop_stack_dummy;
6838 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6839 stop_after_trap = inf_status->stop_after_trap;
6840
6841 if (target_has_stack)
6842 {
6843 /* The point of catch_errors is that if the stack is clobbered,
6844 walking the stack might encounter a garbage pointer and
6845 error() trying to dereference it. */
6846 if (catch_errors
6847 (restore_selected_frame, &inf_status->selected_frame_id,
6848 "Unable to restore previously selected frame:\n",
6849 RETURN_MASK_ERROR) == 0)
6850 /* Error in restoring the selected frame. Select the innermost
6851 frame. */
6852 select_frame (get_current_frame ());
6853 }
6854
6855 xfree (inf_status);
6856 }
6857
6858 static void
6859 do_restore_infcall_control_state_cleanup (void *sts)
6860 {
6861 restore_infcall_control_state (sts);
6862 }
6863
6864 struct cleanup *
6865 make_cleanup_restore_infcall_control_state
6866 (struct infcall_control_state *inf_status)
6867 {
6868 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6869 }
6870
6871 void
6872 discard_infcall_control_state (struct infcall_control_state *inf_status)
6873 {
6874 if (inf_status->thread_control.step_resume_breakpoint)
6875 inf_status->thread_control.step_resume_breakpoint->disposition
6876 = disp_del_at_next_stop;
6877
6878 if (inf_status->thread_control.exception_resume_breakpoint)
6879 inf_status->thread_control.exception_resume_breakpoint->disposition
6880 = disp_del_at_next_stop;
6881
6882 /* See save_infcall_control_state for info on stop_bpstat. */
6883 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6884
6885 xfree (inf_status);
6886 }
6887 \f
6888 int
6889 ptid_match (ptid_t ptid, ptid_t filter)
6890 {
6891 if (ptid_equal (filter, minus_one_ptid))
6892 return 1;
6893 if (ptid_is_pid (filter)
6894 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6895 return 1;
6896 else if (ptid_equal (ptid, filter))
6897 return 1;
6898
6899 return 0;
6900 }
6901
6902 /* restore_inferior_ptid() will be used by the cleanup machinery
6903 to restore the inferior_ptid value saved in a call to
6904 save_inferior_ptid(). */
6905
6906 static void
6907 restore_inferior_ptid (void *arg)
6908 {
6909 ptid_t *saved_ptid_ptr = arg;
6910
6911 inferior_ptid = *saved_ptid_ptr;
6912 xfree (arg);
6913 }
6914
6915 /* Save the value of inferior_ptid so that it may be restored by a
6916 later call to do_cleanups(). Returns the struct cleanup pointer
6917 needed for later doing the cleanup. */
6918
6919 struct cleanup *
6920 save_inferior_ptid (void)
6921 {
6922 ptid_t *saved_ptid_ptr;
6923
6924 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6925 *saved_ptid_ptr = inferior_ptid;
6926 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6927 }
6928 \f
6929
6930 /* User interface for reverse debugging:
6931 Set exec-direction / show exec-direction commands
6932 (returns error unless target implements to_set_exec_direction method). */
6933
6934 int execution_direction = EXEC_FORWARD;
6935 static const char exec_forward[] = "forward";
6936 static const char exec_reverse[] = "reverse";
6937 static const char *exec_direction = exec_forward;
6938 static const char *const exec_direction_names[] = {
6939 exec_forward,
6940 exec_reverse,
6941 NULL
6942 };
6943
6944 static void
6945 set_exec_direction_func (char *args, int from_tty,
6946 struct cmd_list_element *cmd)
6947 {
6948 if (target_can_execute_reverse)
6949 {
6950 if (!strcmp (exec_direction, exec_forward))
6951 execution_direction = EXEC_FORWARD;
6952 else if (!strcmp (exec_direction, exec_reverse))
6953 execution_direction = EXEC_REVERSE;
6954 }
6955 else
6956 {
6957 exec_direction = exec_forward;
6958 error (_("Target does not support this operation."));
6959 }
6960 }
6961
6962 static void
6963 show_exec_direction_func (struct ui_file *out, int from_tty,
6964 struct cmd_list_element *cmd, const char *value)
6965 {
6966 switch (execution_direction) {
6967 case EXEC_FORWARD:
6968 fprintf_filtered (out, _("Forward.\n"));
6969 break;
6970 case EXEC_REVERSE:
6971 fprintf_filtered (out, _("Reverse.\n"));
6972 break;
6973 default:
6974 internal_error (__FILE__, __LINE__,
6975 _("bogus execution_direction value: %d"),
6976 (int) execution_direction);
6977 }
6978 }
6979
6980 /* User interface for non-stop mode. */
6981
6982 int non_stop = 0;
6983
6984 static void
6985 set_non_stop (char *args, int from_tty,
6986 struct cmd_list_element *c)
6987 {
6988 if (target_has_execution)
6989 {
6990 non_stop_1 = non_stop;
6991 error (_("Cannot change this setting while the inferior is running."));
6992 }
6993
6994 non_stop = non_stop_1;
6995 }
6996
6997 static void
6998 show_non_stop (struct ui_file *file, int from_tty,
6999 struct cmd_list_element *c, const char *value)
7000 {
7001 fprintf_filtered (file,
7002 _("Controlling the inferior in non-stop mode is %s.\n"),
7003 value);
7004 }
7005
7006 static void
7007 show_schedule_multiple (struct ui_file *file, int from_tty,
7008 struct cmd_list_element *c, const char *value)
7009 {
7010 fprintf_filtered (file, _("Resuming the execution of threads "
7011 "of all processes is %s.\n"), value);
7012 }
7013
7014 void
7015 _initialize_infrun (void)
7016 {
7017 int i;
7018 int numsigs;
7019
7020 add_info ("signals", signals_info, _("\
7021 What debugger does when program gets various signals.\n\
7022 Specify a signal as argument to print info on that signal only."));
7023 add_info_alias ("handle", "signals", 0);
7024
7025 add_com ("handle", class_run, handle_command, _("\
7026 Specify how to handle a signal.\n\
7027 Args are signals and actions to apply to those signals.\n\
7028 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7029 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7030 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7031 The special arg \"all\" is recognized to mean all signals except those\n\
7032 used by the debugger, typically SIGTRAP and SIGINT.\n\
7033 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7034 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7035 Stop means reenter debugger if this signal happens (implies print).\n\
7036 Print means print a message if this signal happens.\n\
7037 Pass means let program see this signal; otherwise program doesn't know.\n\
7038 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7039 Pass and Stop may be combined."));
7040 if (xdb_commands)
7041 {
7042 add_com ("lz", class_info, signals_info, _("\
7043 What debugger does when program gets various signals.\n\
7044 Specify a signal as argument to print info on that signal only."));
7045 add_com ("z", class_run, xdb_handle_command, _("\
7046 Specify how to handle a signal.\n\
7047 Args are signals and actions to apply to those signals.\n\
7048 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7049 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7050 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7051 The special arg \"all\" is recognized to mean all signals except those\n\
7052 used by the debugger, typically SIGTRAP and SIGINT.\n\
7053 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7054 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7055 nopass), \"Q\" (noprint)\n\
7056 Stop means reenter debugger if this signal happens (implies print).\n\
7057 Print means print a message if this signal happens.\n\
7058 Pass means let program see this signal; otherwise program doesn't know.\n\
7059 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7060 Pass and Stop may be combined."));
7061 }
7062
7063 if (!dbx_commands)
7064 stop_command = add_cmd ("stop", class_obscure,
7065 not_just_help_class_command, _("\
7066 There is no `stop' command, but you can set a hook on `stop'.\n\
7067 This allows you to set a list of commands to be run each time execution\n\
7068 of the program stops."), &cmdlist);
7069
7070 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7071 Set inferior debugging."), _("\
7072 Show inferior debugging."), _("\
7073 When non-zero, inferior specific debugging is enabled."),
7074 NULL,
7075 show_debug_infrun,
7076 &setdebuglist, &showdebuglist);
7077
7078 add_setshow_boolean_cmd ("displaced", class_maintenance,
7079 &debug_displaced, _("\
7080 Set displaced stepping debugging."), _("\
7081 Show displaced stepping debugging."), _("\
7082 When non-zero, displaced stepping specific debugging is enabled."),
7083 NULL,
7084 show_debug_displaced,
7085 &setdebuglist, &showdebuglist);
7086
7087 add_setshow_boolean_cmd ("non-stop", no_class,
7088 &non_stop_1, _("\
7089 Set whether gdb controls the inferior in non-stop mode."), _("\
7090 Show whether gdb controls the inferior in non-stop mode."), _("\
7091 When debugging a multi-threaded program and this setting is\n\
7092 off (the default, also called all-stop mode), when one thread stops\n\
7093 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7094 all other threads in the program while you interact with the thread of\n\
7095 interest. When you continue or step a thread, you can allow the other\n\
7096 threads to run, or have them remain stopped, but while you inspect any\n\
7097 thread's state, all threads stop.\n\
7098 \n\
7099 In non-stop mode, when one thread stops, other threads can continue\n\
7100 to run freely. You'll be able to step each thread independently,\n\
7101 leave it stopped or free to run as needed."),
7102 set_non_stop,
7103 show_non_stop,
7104 &setlist,
7105 &showlist);
7106
7107 numsigs = (int) TARGET_SIGNAL_LAST;
7108 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7109 signal_print = (unsigned char *)
7110 xmalloc (sizeof (signal_print[0]) * numsigs);
7111 signal_program = (unsigned char *)
7112 xmalloc (sizeof (signal_program[0]) * numsigs);
7113 signal_pass = (unsigned char *)
7114 xmalloc (sizeof (signal_program[0]) * numsigs);
7115 for (i = 0; i < numsigs; i++)
7116 {
7117 signal_stop[i] = 1;
7118 signal_print[i] = 1;
7119 signal_program[i] = 1;
7120 }
7121
7122 /* Signals caused by debugger's own actions
7123 should not be given to the program afterwards. */
7124 signal_program[TARGET_SIGNAL_TRAP] = 0;
7125 signal_program[TARGET_SIGNAL_INT] = 0;
7126
7127 /* Signals that are not errors should not normally enter the debugger. */
7128 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7129 signal_print[TARGET_SIGNAL_ALRM] = 0;
7130 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7131 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7132 signal_stop[TARGET_SIGNAL_PROF] = 0;
7133 signal_print[TARGET_SIGNAL_PROF] = 0;
7134 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7135 signal_print[TARGET_SIGNAL_CHLD] = 0;
7136 signal_stop[TARGET_SIGNAL_IO] = 0;
7137 signal_print[TARGET_SIGNAL_IO] = 0;
7138 signal_stop[TARGET_SIGNAL_POLL] = 0;
7139 signal_print[TARGET_SIGNAL_POLL] = 0;
7140 signal_stop[TARGET_SIGNAL_URG] = 0;
7141 signal_print[TARGET_SIGNAL_URG] = 0;
7142 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7143 signal_print[TARGET_SIGNAL_WINCH] = 0;
7144 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7145 signal_print[TARGET_SIGNAL_PRIO] = 0;
7146
7147 /* These signals are used internally by user-level thread
7148 implementations. (See signal(5) on Solaris.) Like the above
7149 signals, a healthy program receives and handles them as part of
7150 its normal operation. */
7151 signal_stop[TARGET_SIGNAL_LWP] = 0;
7152 signal_print[TARGET_SIGNAL_LWP] = 0;
7153 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7154 signal_print[TARGET_SIGNAL_WAITING] = 0;
7155 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7156 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7157
7158 /* Update cached state. */
7159 signal_cache_update (-1);
7160
7161 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7162 &stop_on_solib_events, _("\
7163 Set stopping for shared library events."), _("\
7164 Show stopping for shared library events."), _("\
7165 If nonzero, gdb will give control to the user when the dynamic linker\n\
7166 notifies gdb of shared library events. The most common event of interest\n\
7167 to the user would be loading/unloading of a new library."),
7168 NULL,
7169 show_stop_on_solib_events,
7170 &setlist, &showlist);
7171
7172 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7173 follow_fork_mode_kind_names,
7174 &follow_fork_mode_string, _("\
7175 Set debugger response to a program call of fork or vfork."), _("\
7176 Show debugger response to a program call of fork or vfork."), _("\
7177 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7178 parent - the original process is debugged after a fork\n\
7179 child - the new process is debugged after a fork\n\
7180 The unfollowed process will continue to run.\n\
7181 By default, the debugger will follow the parent process."),
7182 NULL,
7183 show_follow_fork_mode_string,
7184 &setlist, &showlist);
7185
7186 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7187 follow_exec_mode_names,
7188 &follow_exec_mode_string, _("\
7189 Set debugger response to a program call of exec."), _("\
7190 Show debugger response to a program call of exec."), _("\
7191 An exec call replaces the program image of a process.\n\
7192 \n\
7193 follow-exec-mode can be:\n\
7194 \n\
7195 new - the debugger creates a new inferior and rebinds the process\n\
7196 to this new inferior. The program the process was running before\n\
7197 the exec call can be restarted afterwards by restarting the original\n\
7198 inferior.\n\
7199 \n\
7200 same - the debugger keeps the process bound to the same inferior.\n\
7201 The new executable image replaces the previous executable loaded in\n\
7202 the inferior. Restarting the inferior after the exec call restarts\n\
7203 the executable the process was running after the exec call.\n\
7204 \n\
7205 By default, the debugger will use the same inferior."),
7206 NULL,
7207 show_follow_exec_mode_string,
7208 &setlist, &showlist);
7209
7210 add_setshow_enum_cmd ("scheduler-locking", class_run,
7211 scheduler_enums, &scheduler_mode, _("\
7212 Set mode for locking scheduler during execution."), _("\
7213 Show mode for locking scheduler during execution."), _("\
7214 off == no locking (threads may preempt at any time)\n\
7215 on == full locking (no thread except the current thread may run)\n\
7216 step == scheduler locked during every single-step operation.\n\
7217 In this mode, no other thread may run during a step command.\n\
7218 Other threads may run while stepping over a function call ('next')."),
7219 set_schedlock_func, /* traps on target vector */
7220 show_scheduler_mode,
7221 &setlist, &showlist);
7222
7223 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7224 Set mode for resuming threads of all processes."), _("\
7225 Show mode for resuming threads of all processes."), _("\
7226 When on, execution commands (such as 'continue' or 'next') resume all\n\
7227 threads of all processes. When off (which is the default), execution\n\
7228 commands only resume the threads of the current process. The set of\n\
7229 threads that are resumed is further refined by the scheduler-locking\n\
7230 mode (see help set scheduler-locking)."),
7231 NULL,
7232 show_schedule_multiple,
7233 &setlist, &showlist);
7234
7235 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7236 Set mode of the step operation."), _("\
7237 Show mode of the step operation."), _("\
7238 When set, doing a step over a function without debug line information\n\
7239 will stop at the first instruction of that function. Otherwise, the\n\
7240 function is skipped and the step command stops at a different source line."),
7241 NULL,
7242 show_step_stop_if_no_debug,
7243 &setlist, &showlist);
7244
7245 add_setshow_enum_cmd ("displaced-stepping", class_run,
7246 can_use_displaced_stepping_enum,
7247 &can_use_displaced_stepping, _("\
7248 Set debugger's willingness to use displaced stepping."), _("\
7249 Show debugger's willingness to use displaced stepping."), _("\
7250 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7251 supported by the target architecture. If off, gdb will not use displaced\n\
7252 stepping to step over breakpoints, even if such is supported by the target\n\
7253 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7254 if the target architecture supports it and non-stop mode is active, but will not\n\
7255 use it in all-stop mode (see help set non-stop)."),
7256 NULL,
7257 show_can_use_displaced_stepping,
7258 &setlist, &showlist);
7259
7260 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7261 &exec_direction, _("Set direction of execution.\n\
7262 Options are 'forward' or 'reverse'."),
7263 _("Show direction of execution (forward/reverse)."),
7264 _("Tells gdb whether to execute forward or backward."),
7265 set_exec_direction_func, show_exec_direction_func,
7266 &setlist, &showlist);
7267
7268 /* Set/show detach-on-fork: user-settable mode. */
7269
7270 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7271 Set whether gdb will detach the child of a fork."), _("\
7272 Show whether gdb will detach the child of a fork."), _("\
7273 Tells gdb whether to detach the child of a fork."),
7274 NULL, NULL, &setlist, &showlist);
7275
7276 /* Set/show disable address space randomization mode. */
7277
7278 add_setshow_boolean_cmd ("disable-randomization", class_support,
7279 &disable_randomization, _("\
7280 Set disabling of debuggee's virtual address space randomization."), _("\
7281 Show disabling of debuggee's virtual address space randomization."), _("\
7282 When this mode is on (which is the default), randomization of the virtual\n\
7283 address space is disabled. Standalone programs run with the randomization\n\
7284 enabled by default on some platforms."),
7285 &set_disable_randomization,
7286 &show_disable_randomization,
7287 &setlist, &showlist);
7288
7289 /* ptid initializations */
7290 inferior_ptid = null_ptid;
7291 target_last_wait_ptid = minus_one_ptid;
7292
7293 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7294 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7295 observer_attach_thread_exit (infrun_thread_thread_exit);
7296 observer_attach_inferior_exit (infrun_inferior_exit);
7297
7298 /* Explicitly create without lookup, since that tries to create a
7299 value with a void typed value, and when we get here, gdbarch
7300 isn't initialized yet. At this point, we're quite sure there
7301 isn't another convenience variable of the same name. */
7302 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7303
7304 add_setshow_boolean_cmd ("observer", no_class,
7305 &observer_mode_1, _("\
7306 Set whether gdb controls the inferior in observer mode."), _("\
7307 Show whether gdb controls the inferior in observer mode."), _("\
7308 In observer mode, GDB can get data from the inferior, but not\n\
7309 affect its execution. Registers and memory may not be changed,\n\
7310 breakpoints may not be set, and the program cannot be interrupted\n\
7311 or signalled."),
7312 set_observer_mode,
7313 show_observer_mode,
7314 &setlist,
7315 &showlist);
7316 }
This page took 0.180963 seconds and 4 git commands to generate.