1b2da67fff3e86725ddc744c5c754d16f96edb7f
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2012 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdb_string.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "exceptions.h"
28 #include "breakpoint.h"
29 #include "gdb_wait.h"
30 #include "gdbcore.h"
31 #include "gdbcmd.h"
32 #include "cli/cli-script.h"
33 #include "target.h"
34 #include "gdbthread.h"
35 #include "annotate.h"
36 #include "symfile.h"
37 #include "top.h"
38 #include <signal.h>
39 #include "inf-loop.h"
40 #include "regcache.h"
41 #include "value.h"
42 #include "observer.h"
43 #include "language.h"
44 #include "solib.h"
45 #include "main.h"
46 #include "dictionary.h"
47 #include "block.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55 #include "continuations.h"
56 #include "interps.h"
57 #include "skip.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149
150 /* Support for disabling address space randomization. */
151
152 int disable_randomization = 1;
153
154 static void
155 show_disable_randomization (struct ui_file *file, int from_tty,
156 struct cmd_list_element *c, const char *value)
157 {
158 if (target_supports_disable_randomization ())
159 fprintf_filtered (file,
160 _("Disabling randomization of debuggee's "
161 "virtual address space is %s.\n"),
162 value);
163 else
164 fputs_filtered (_("Disabling randomization of debuggee's "
165 "virtual address space is unsupported on\n"
166 "this platform.\n"), file);
167 }
168
169 static void
170 set_disable_randomization (char *args, int from_tty,
171 struct cmd_list_element *c)
172 {
173 if (!target_supports_disable_randomization ())
174 error (_("Disabling randomization of debuggee's "
175 "virtual address space is unsupported on\n"
176 "this platform."));
177 }
178
179
180 /* If the program uses ELF-style shared libraries, then calls to
181 functions in shared libraries go through stubs, which live in a
182 table called the PLT (Procedure Linkage Table). The first time the
183 function is called, the stub sends control to the dynamic linker,
184 which looks up the function's real address, patches the stub so
185 that future calls will go directly to the function, and then passes
186 control to the function.
187
188 If we are stepping at the source level, we don't want to see any of
189 this --- we just want to skip over the stub and the dynamic linker.
190 The simple approach is to single-step until control leaves the
191 dynamic linker.
192
193 However, on some systems (e.g., Red Hat's 5.2 distribution) the
194 dynamic linker calls functions in the shared C library, so you
195 can't tell from the PC alone whether the dynamic linker is still
196 running. In this case, we use a step-resume breakpoint to get us
197 past the dynamic linker, as if we were using "next" to step over a
198 function call.
199
200 in_solib_dynsym_resolve_code() says whether we're in the dynamic
201 linker code or not. Normally, this means we single-step. However,
202 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
203 address where we can place a step-resume breakpoint to get past the
204 linker's symbol resolution function.
205
206 in_solib_dynsym_resolve_code() can generally be implemented in a
207 pretty portable way, by comparing the PC against the address ranges
208 of the dynamic linker's sections.
209
210 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
211 it depends on internal details of the dynamic linker. It's usually
212 not too hard to figure out where to put a breakpoint, but it
213 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
214 sanity checking. If it can't figure things out, returning zero and
215 getting the (possibly confusing) stepping behavior is better than
216 signalling an error, which will obscure the change in the
217 inferior's state. */
218
219 /* This function returns TRUE if pc is the address of an instruction
220 that lies within the dynamic linker (such as the event hook, or the
221 dld itself).
222
223 This function must be used only when a dynamic linker event has
224 been caught, and the inferior is being stepped out of the hook, or
225 undefined results are guaranteed. */
226
227 #ifndef SOLIB_IN_DYNAMIC_LINKER
228 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
229 #endif
230
231 /* "Observer mode" is somewhat like a more extreme version of
232 non-stop, in which all GDB operations that might affect the
233 target's execution have been disabled. */
234
235 static int non_stop_1 = 0;
236
237 int observer_mode = 0;
238 static int observer_mode_1 = 0;
239
240 static void
241 set_observer_mode (char *args, int from_tty,
242 struct cmd_list_element *c)
243 {
244 extern int pagination_enabled;
245
246 if (target_has_execution)
247 {
248 observer_mode_1 = observer_mode;
249 error (_("Cannot change this setting while the inferior is running."));
250 }
251
252 observer_mode = observer_mode_1;
253
254 may_write_registers = !observer_mode;
255 may_write_memory = !observer_mode;
256 may_insert_breakpoints = !observer_mode;
257 may_insert_tracepoints = !observer_mode;
258 /* We can insert fast tracepoints in or out of observer mode,
259 but enable them if we're going into this mode. */
260 if (observer_mode)
261 may_insert_fast_tracepoints = 1;
262 may_stop = !observer_mode;
263 update_target_permissions ();
264
265 /* Going *into* observer mode we must force non-stop, then
266 going out we leave it that way. */
267 if (observer_mode)
268 {
269 target_async_permitted = 1;
270 pagination_enabled = 0;
271 non_stop = non_stop_1 = 1;
272 }
273
274 if (from_tty)
275 printf_filtered (_("Observer mode is now %s.\n"),
276 (observer_mode ? "on" : "off"));
277 }
278
279 static void
280 show_observer_mode (struct ui_file *file, int from_tty,
281 struct cmd_list_element *c, const char *value)
282 {
283 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
284 }
285
286 /* This updates the value of observer mode based on changes in
287 permissions. Note that we are deliberately ignoring the values of
288 may-write-registers and may-write-memory, since the user may have
289 reason to enable these during a session, for instance to turn on a
290 debugging-related global. */
291
292 void
293 update_observer_mode (void)
294 {
295 int newval;
296
297 newval = (!may_insert_breakpoints
298 && !may_insert_tracepoints
299 && may_insert_fast_tracepoints
300 && !may_stop
301 && non_stop);
302
303 /* Let the user know if things change. */
304 if (newval != observer_mode)
305 printf_filtered (_("Observer mode is now %s.\n"),
306 (newval ? "on" : "off"));
307
308 observer_mode = observer_mode_1 = newval;
309 }
310
311 /* Tables of how to react to signals; the user sets them. */
312
313 static unsigned char *signal_stop;
314 static unsigned char *signal_print;
315 static unsigned char *signal_program;
316
317 /* Table of signals that the target may silently handle.
318 This is automatically determined from the flags above,
319 and simply cached here. */
320 static unsigned char *signal_pass;
321
322 #define SET_SIGS(nsigs,sigs,flags) \
323 do { \
324 int signum = (nsigs); \
325 while (signum-- > 0) \
326 if ((sigs)[signum]) \
327 (flags)[signum] = 1; \
328 } while (0)
329
330 #define UNSET_SIGS(nsigs,sigs,flags) \
331 do { \
332 int signum = (nsigs); \
333 while (signum-- > 0) \
334 if ((sigs)[signum]) \
335 (flags)[signum] = 0; \
336 } while (0)
337
338 /* Value to pass to target_resume() to cause all threads to resume. */
339
340 #define RESUME_ALL minus_one_ptid
341
342 /* Command list pointer for the "stop" placeholder. */
343
344 static struct cmd_list_element *stop_command;
345
346 /* Function inferior was in as of last step command. */
347
348 static struct symbol *step_start_function;
349
350 /* Nonzero if we want to give control to the user when we're notified
351 of shared library events by the dynamic linker. */
352 int stop_on_solib_events;
353 static void
354 show_stop_on_solib_events (struct ui_file *file, int from_tty,
355 struct cmd_list_element *c, const char *value)
356 {
357 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
358 value);
359 }
360
361 /* Nonzero means expecting a trace trap
362 and should stop the inferior and return silently when it happens. */
363
364 int stop_after_trap;
365
366 /* Save register contents here when executing a "finish" command or are
367 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
368 Thus this contains the return value from the called function (assuming
369 values are returned in a register). */
370
371 struct regcache *stop_registers;
372
373 /* Nonzero after stop if current stack frame should be printed. */
374
375 static int stop_print_frame;
376
377 /* This is a cached copy of the pid/waitstatus of the last event
378 returned by target_wait()/deprecated_target_wait_hook(). This
379 information is returned by get_last_target_status(). */
380 static ptid_t target_last_wait_ptid;
381 static struct target_waitstatus target_last_waitstatus;
382
383 static void context_switch (ptid_t ptid);
384
385 void init_thread_stepping_state (struct thread_info *tss);
386
387 void init_infwait_state (void);
388
389 static const char follow_fork_mode_child[] = "child";
390 static const char follow_fork_mode_parent[] = "parent";
391
392 static const char *const follow_fork_mode_kind_names[] = {
393 follow_fork_mode_child,
394 follow_fork_mode_parent,
395 NULL
396 };
397
398 static const char *follow_fork_mode_string = follow_fork_mode_parent;
399 static void
400 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
401 struct cmd_list_element *c, const char *value)
402 {
403 fprintf_filtered (file,
404 _("Debugger response to a program "
405 "call of fork or vfork is \"%s\".\n"),
406 value);
407 }
408 \f
409
410 /* Tell the target to follow the fork we're stopped at. Returns true
411 if the inferior should be resumed; false, if the target for some
412 reason decided it's best not to resume. */
413
414 static int
415 follow_fork (void)
416 {
417 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
418 int should_resume = 1;
419 struct thread_info *tp;
420
421 /* Copy user stepping state to the new inferior thread. FIXME: the
422 followed fork child thread should have a copy of most of the
423 parent thread structure's run control related fields, not just these.
424 Initialized to avoid "may be used uninitialized" warnings from gcc. */
425 struct breakpoint *step_resume_breakpoint = NULL;
426 struct breakpoint *exception_resume_breakpoint = NULL;
427 CORE_ADDR step_range_start = 0;
428 CORE_ADDR step_range_end = 0;
429 struct frame_id step_frame_id = { 0 };
430
431 if (!non_stop)
432 {
433 ptid_t wait_ptid;
434 struct target_waitstatus wait_status;
435
436 /* Get the last target status returned by target_wait(). */
437 get_last_target_status (&wait_ptid, &wait_status);
438
439 /* If not stopped at a fork event, then there's nothing else to
440 do. */
441 if (wait_status.kind != TARGET_WAITKIND_FORKED
442 && wait_status.kind != TARGET_WAITKIND_VFORKED)
443 return 1;
444
445 /* Check if we switched over from WAIT_PTID, since the event was
446 reported. */
447 if (!ptid_equal (wait_ptid, minus_one_ptid)
448 && !ptid_equal (inferior_ptid, wait_ptid))
449 {
450 /* We did. Switch back to WAIT_PTID thread, to tell the
451 target to follow it (in either direction). We'll
452 afterwards refuse to resume, and inform the user what
453 happened. */
454 switch_to_thread (wait_ptid);
455 should_resume = 0;
456 }
457 }
458
459 tp = inferior_thread ();
460
461 /* If there were any forks/vforks that were caught and are now to be
462 followed, then do so now. */
463 switch (tp->pending_follow.kind)
464 {
465 case TARGET_WAITKIND_FORKED:
466 case TARGET_WAITKIND_VFORKED:
467 {
468 ptid_t parent, child;
469
470 /* If the user did a next/step, etc, over a fork call,
471 preserve the stepping state in the fork child. */
472 if (follow_child && should_resume)
473 {
474 step_resume_breakpoint = clone_momentary_breakpoint
475 (tp->control.step_resume_breakpoint);
476 step_range_start = tp->control.step_range_start;
477 step_range_end = tp->control.step_range_end;
478 step_frame_id = tp->control.step_frame_id;
479 exception_resume_breakpoint
480 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
481
482 /* For now, delete the parent's sr breakpoint, otherwise,
483 parent/child sr breakpoints are considered duplicates,
484 and the child version will not be installed. Remove
485 this when the breakpoints module becomes aware of
486 inferiors and address spaces. */
487 delete_step_resume_breakpoint (tp);
488 tp->control.step_range_start = 0;
489 tp->control.step_range_end = 0;
490 tp->control.step_frame_id = null_frame_id;
491 delete_exception_resume_breakpoint (tp);
492 }
493
494 parent = inferior_ptid;
495 child = tp->pending_follow.value.related_pid;
496
497 /* Tell the target to do whatever is necessary to follow
498 either parent or child. */
499 if (target_follow_fork (follow_child))
500 {
501 /* Target refused to follow, or there's some other reason
502 we shouldn't resume. */
503 should_resume = 0;
504 }
505 else
506 {
507 /* This pending follow fork event is now handled, one way
508 or another. The previous selected thread may be gone
509 from the lists by now, but if it is still around, need
510 to clear the pending follow request. */
511 tp = find_thread_ptid (parent);
512 if (tp)
513 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
514
515 /* This makes sure we don't try to apply the "Switched
516 over from WAIT_PID" logic above. */
517 nullify_last_target_wait_ptid ();
518
519 /* If we followed the child, switch to it... */
520 if (follow_child)
521 {
522 switch_to_thread (child);
523
524 /* ... and preserve the stepping state, in case the
525 user was stepping over the fork call. */
526 if (should_resume)
527 {
528 tp = inferior_thread ();
529 tp->control.step_resume_breakpoint
530 = step_resume_breakpoint;
531 tp->control.step_range_start = step_range_start;
532 tp->control.step_range_end = step_range_end;
533 tp->control.step_frame_id = step_frame_id;
534 tp->control.exception_resume_breakpoint
535 = exception_resume_breakpoint;
536 }
537 else
538 {
539 /* If we get here, it was because we're trying to
540 resume from a fork catchpoint, but, the user
541 has switched threads away from the thread that
542 forked. In that case, the resume command
543 issued is most likely not applicable to the
544 child, so just warn, and refuse to resume. */
545 warning (_("Not resuming: switched threads "
546 "before following fork child.\n"));
547 }
548
549 /* Reset breakpoints in the child as appropriate. */
550 follow_inferior_reset_breakpoints ();
551 }
552 else
553 switch_to_thread (parent);
554 }
555 }
556 break;
557 case TARGET_WAITKIND_SPURIOUS:
558 /* Nothing to follow. */
559 break;
560 default:
561 internal_error (__FILE__, __LINE__,
562 "Unexpected pending_follow.kind %d\n",
563 tp->pending_follow.kind);
564 break;
565 }
566
567 return should_resume;
568 }
569
570 void
571 follow_inferior_reset_breakpoints (void)
572 {
573 struct thread_info *tp = inferior_thread ();
574
575 /* Was there a step_resume breakpoint? (There was if the user
576 did a "next" at the fork() call.) If so, explicitly reset its
577 thread number.
578
579 step_resumes are a form of bp that are made to be per-thread.
580 Since we created the step_resume bp when the parent process
581 was being debugged, and now are switching to the child process,
582 from the breakpoint package's viewpoint, that's a switch of
583 "threads". We must update the bp's notion of which thread
584 it is for, or it'll be ignored when it triggers. */
585
586 if (tp->control.step_resume_breakpoint)
587 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
588
589 if (tp->control.exception_resume_breakpoint)
590 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
591
592 /* Reinsert all breakpoints in the child. The user may have set
593 breakpoints after catching the fork, in which case those
594 were never set in the child, but only in the parent. This makes
595 sure the inserted breakpoints match the breakpoint list. */
596
597 breakpoint_re_set ();
598 insert_breakpoints ();
599 }
600
601 /* The child has exited or execed: resume threads of the parent the
602 user wanted to be executing. */
603
604 static int
605 proceed_after_vfork_done (struct thread_info *thread,
606 void *arg)
607 {
608 int pid = * (int *) arg;
609
610 if (ptid_get_pid (thread->ptid) == pid
611 && is_running (thread->ptid)
612 && !is_executing (thread->ptid)
613 && !thread->stop_requested
614 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
615 {
616 if (debug_infrun)
617 fprintf_unfiltered (gdb_stdlog,
618 "infrun: resuming vfork parent thread %s\n",
619 target_pid_to_str (thread->ptid));
620
621 switch_to_thread (thread->ptid);
622 clear_proceed_status ();
623 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
624 }
625
626 return 0;
627 }
628
629 /* Called whenever we notice an exec or exit event, to handle
630 detaching or resuming a vfork parent. */
631
632 static void
633 handle_vfork_child_exec_or_exit (int exec)
634 {
635 struct inferior *inf = current_inferior ();
636
637 if (inf->vfork_parent)
638 {
639 int resume_parent = -1;
640
641 /* This exec or exit marks the end of the shared memory region
642 between the parent and the child. If the user wanted to
643 detach from the parent, now is the time. */
644
645 if (inf->vfork_parent->pending_detach)
646 {
647 struct thread_info *tp;
648 struct cleanup *old_chain;
649 struct program_space *pspace;
650 struct address_space *aspace;
651
652 /* follow-fork child, detach-on-fork on. */
653
654 old_chain = make_cleanup_restore_current_thread ();
655
656 /* We're letting loose of the parent. */
657 tp = any_live_thread_of_process (inf->vfork_parent->pid);
658 switch_to_thread (tp->ptid);
659
660 /* We're about to detach from the parent, which implicitly
661 removes breakpoints from its address space. There's a
662 catch here: we want to reuse the spaces for the child,
663 but, parent/child are still sharing the pspace at this
664 point, although the exec in reality makes the kernel give
665 the child a fresh set of new pages. The problem here is
666 that the breakpoints module being unaware of this, would
667 likely chose the child process to write to the parent
668 address space. Swapping the child temporarily away from
669 the spaces has the desired effect. Yes, this is "sort
670 of" a hack. */
671
672 pspace = inf->pspace;
673 aspace = inf->aspace;
674 inf->aspace = NULL;
675 inf->pspace = NULL;
676
677 if (debug_infrun || info_verbose)
678 {
679 target_terminal_ours ();
680
681 if (exec)
682 fprintf_filtered (gdb_stdlog,
683 "Detaching vfork parent process "
684 "%d after child exec.\n",
685 inf->vfork_parent->pid);
686 else
687 fprintf_filtered (gdb_stdlog,
688 "Detaching vfork parent process "
689 "%d after child exit.\n",
690 inf->vfork_parent->pid);
691 }
692
693 target_detach (NULL, 0);
694
695 /* Put it back. */
696 inf->pspace = pspace;
697 inf->aspace = aspace;
698
699 do_cleanups (old_chain);
700 }
701 else if (exec)
702 {
703 /* We're staying attached to the parent, so, really give the
704 child a new address space. */
705 inf->pspace = add_program_space (maybe_new_address_space ());
706 inf->aspace = inf->pspace->aspace;
707 inf->removable = 1;
708 set_current_program_space (inf->pspace);
709
710 resume_parent = inf->vfork_parent->pid;
711
712 /* Break the bonds. */
713 inf->vfork_parent->vfork_child = NULL;
714 }
715 else
716 {
717 struct cleanup *old_chain;
718 struct program_space *pspace;
719
720 /* If this is a vfork child exiting, then the pspace and
721 aspaces were shared with the parent. Since we're
722 reporting the process exit, we'll be mourning all that is
723 found in the address space, and switching to null_ptid,
724 preparing to start a new inferior. But, since we don't
725 want to clobber the parent's address/program spaces, we
726 go ahead and create a new one for this exiting
727 inferior. */
728
729 /* Switch to null_ptid, so that clone_program_space doesn't want
730 to read the selected frame of a dead process. */
731 old_chain = save_inferior_ptid ();
732 inferior_ptid = null_ptid;
733
734 /* This inferior is dead, so avoid giving the breakpoints
735 module the option to write through to it (cloning a
736 program space resets breakpoints). */
737 inf->aspace = NULL;
738 inf->pspace = NULL;
739 pspace = add_program_space (maybe_new_address_space ());
740 set_current_program_space (pspace);
741 inf->removable = 1;
742 inf->symfile_flags = SYMFILE_NO_READ;
743 clone_program_space (pspace, inf->vfork_parent->pspace);
744 inf->pspace = pspace;
745 inf->aspace = pspace->aspace;
746
747 /* Put back inferior_ptid. We'll continue mourning this
748 inferior. */
749 do_cleanups (old_chain);
750
751 resume_parent = inf->vfork_parent->pid;
752 /* Break the bonds. */
753 inf->vfork_parent->vfork_child = NULL;
754 }
755
756 inf->vfork_parent = NULL;
757
758 gdb_assert (current_program_space == inf->pspace);
759
760 if (non_stop && resume_parent != -1)
761 {
762 /* If the user wanted the parent to be running, let it go
763 free now. */
764 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
765
766 if (debug_infrun)
767 fprintf_unfiltered (gdb_stdlog,
768 "infrun: resuming vfork parent process %d\n",
769 resume_parent);
770
771 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
772
773 do_cleanups (old_chain);
774 }
775 }
776 }
777
778 /* Enum strings for "set|show displaced-stepping". */
779
780 static const char follow_exec_mode_new[] = "new";
781 static const char follow_exec_mode_same[] = "same";
782 static const char *const follow_exec_mode_names[] =
783 {
784 follow_exec_mode_new,
785 follow_exec_mode_same,
786 NULL,
787 };
788
789 static const char *follow_exec_mode_string = follow_exec_mode_same;
790 static void
791 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
792 struct cmd_list_element *c, const char *value)
793 {
794 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
795 }
796
797 /* EXECD_PATHNAME is assumed to be non-NULL. */
798
799 static void
800 follow_exec (ptid_t pid, char *execd_pathname)
801 {
802 struct thread_info *th = inferior_thread ();
803 struct inferior *inf = current_inferior ();
804
805 /* This is an exec event that we actually wish to pay attention to.
806 Refresh our symbol table to the newly exec'd program, remove any
807 momentary bp's, etc.
808
809 If there are breakpoints, they aren't really inserted now,
810 since the exec() transformed our inferior into a fresh set
811 of instructions.
812
813 We want to preserve symbolic breakpoints on the list, since
814 we have hopes that they can be reset after the new a.out's
815 symbol table is read.
816
817 However, any "raw" breakpoints must be removed from the list
818 (e.g., the solib bp's), since their address is probably invalid
819 now.
820
821 And, we DON'T want to call delete_breakpoints() here, since
822 that may write the bp's "shadow contents" (the instruction
823 value that was overwritten witha TRAP instruction). Since
824 we now have a new a.out, those shadow contents aren't valid. */
825
826 mark_breakpoints_out ();
827
828 update_breakpoints_after_exec ();
829
830 /* If there was one, it's gone now. We cannot truly step-to-next
831 statement through an exec(). */
832 th->control.step_resume_breakpoint = NULL;
833 th->control.exception_resume_breakpoint = NULL;
834 th->control.step_range_start = 0;
835 th->control.step_range_end = 0;
836
837 /* The target reports the exec event to the main thread, even if
838 some other thread does the exec, and even if the main thread was
839 already stopped --- if debugging in non-stop mode, it's possible
840 the user had the main thread held stopped in the previous image
841 --- release it now. This is the same behavior as step-over-exec
842 with scheduler-locking on in all-stop mode. */
843 th->stop_requested = 0;
844
845 /* What is this a.out's name? */
846 printf_unfiltered (_("%s is executing new program: %s\n"),
847 target_pid_to_str (inferior_ptid),
848 execd_pathname);
849
850 /* We've followed the inferior through an exec. Therefore, the
851 inferior has essentially been killed & reborn. */
852
853 gdb_flush (gdb_stdout);
854
855 breakpoint_init_inferior (inf_execd);
856
857 if (gdb_sysroot && *gdb_sysroot)
858 {
859 char *name = alloca (strlen (gdb_sysroot)
860 + strlen (execd_pathname)
861 + 1);
862
863 strcpy (name, gdb_sysroot);
864 strcat (name, execd_pathname);
865 execd_pathname = name;
866 }
867
868 /* Reset the shared library package. This ensures that we get a
869 shlib event when the child reaches "_start", at which point the
870 dld will have had a chance to initialize the child. */
871 /* Also, loading a symbol file below may trigger symbol lookups, and
872 we don't want those to be satisfied by the libraries of the
873 previous incarnation of this process. */
874 no_shared_libraries (NULL, 0);
875
876 if (follow_exec_mode_string == follow_exec_mode_new)
877 {
878 struct program_space *pspace;
879
880 /* The user wants to keep the old inferior and program spaces
881 around. Create a new fresh one, and switch to it. */
882
883 inf = add_inferior (current_inferior ()->pid);
884 pspace = add_program_space (maybe_new_address_space ());
885 inf->pspace = pspace;
886 inf->aspace = pspace->aspace;
887
888 exit_inferior_num_silent (current_inferior ()->num);
889
890 set_current_inferior (inf);
891 set_current_program_space (pspace);
892 }
893
894 gdb_assert (current_program_space == inf->pspace);
895
896 /* That a.out is now the one to use. */
897 exec_file_attach (execd_pathname, 0);
898
899 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
900 (Position Independent Executable) main symbol file will get applied by
901 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
902 the breakpoints with the zero displacement. */
903
904 symbol_file_add (execd_pathname,
905 (inf->symfile_flags
906 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
907 NULL, 0);
908
909 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
910 set_initial_language ();
911
912 #ifdef SOLIB_CREATE_INFERIOR_HOOK
913 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
914 #else
915 solib_create_inferior_hook (0);
916 #endif
917
918 jit_inferior_created_hook ();
919
920 breakpoint_re_set ();
921
922 /* Reinsert all breakpoints. (Those which were symbolic have
923 been reset to the proper address in the new a.out, thanks
924 to symbol_file_command...). */
925 insert_breakpoints ();
926
927 /* The next resume of this inferior should bring it to the shlib
928 startup breakpoints. (If the user had also set bp's on
929 "main" from the old (parent) process, then they'll auto-
930 matically get reset there in the new process.). */
931 }
932
933 /* Non-zero if we just simulating a single-step. This is needed
934 because we cannot remove the breakpoints in the inferior process
935 until after the `wait' in `wait_for_inferior'. */
936 static int singlestep_breakpoints_inserted_p = 0;
937
938 /* The thread we inserted single-step breakpoints for. */
939 static ptid_t singlestep_ptid;
940
941 /* PC when we started this single-step. */
942 static CORE_ADDR singlestep_pc;
943
944 /* If another thread hit the singlestep breakpoint, we save the original
945 thread here so that we can resume single-stepping it later. */
946 static ptid_t saved_singlestep_ptid;
947 static int stepping_past_singlestep_breakpoint;
948
949 /* If not equal to null_ptid, this means that after stepping over breakpoint
950 is finished, we need to switch to deferred_step_ptid, and step it.
951
952 The use case is when one thread has hit a breakpoint, and then the user
953 has switched to another thread and issued 'step'. We need to step over
954 breakpoint in the thread which hit the breakpoint, but then continue
955 stepping the thread user has selected. */
956 static ptid_t deferred_step_ptid;
957 \f
958 /* Displaced stepping. */
959
960 /* In non-stop debugging mode, we must take special care to manage
961 breakpoints properly; in particular, the traditional strategy for
962 stepping a thread past a breakpoint it has hit is unsuitable.
963 'Displaced stepping' is a tactic for stepping one thread past a
964 breakpoint it has hit while ensuring that other threads running
965 concurrently will hit the breakpoint as they should.
966
967 The traditional way to step a thread T off a breakpoint in a
968 multi-threaded program in all-stop mode is as follows:
969
970 a0) Initially, all threads are stopped, and breakpoints are not
971 inserted.
972 a1) We single-step T, leaving breakpoints uninserted.
973 a2) We insert breakpoints, and resume all threads.
974
975 In non-stop debugging, however, this strategy is unsuitable: we
976 don't want to have to stop all threads in the system in order to
977 continue or step T past a breakpoint. Instead, we use displaced
978 stepping:
979
980 n0) Initially, T is stopped, other threads are running, and
981 breakpoints are inserted.
982 n1) We copy the instruction "under" the breakpoint to a separate
983 location, outside the main code stream, making any adjustments
984 to the instruction, register, and memory state as directed by
985 T's architecture.
986 n2) We single-step T over the instruction at its new location.
987 n3) We adjust the resulting register and memory state as directed
988 by T's architecture. This includes resetting T's PC to point
989 back into the main instruction stream.
990 n4) We resume T.
991
992 This approach depends on the following gdbarch methods:
993
994 - gdbarch_max_insn_length and gdbarch_displaced_step_location
995 indicate where to copy the instruction, and how much space must
996 be reserved there. We use these in step n1.
997
998 - gdbarch_displaced_step_copy_insn copies a instruction to a new
999 address, and makes any necessary adjustments to the instruction,
1000 register contents, and memory. We use this in step n1.
1001
1002 - gdbarch_displaced_step_fixup adjusts registers and memory after
1003 we have successfuly single-stepped the instruction, to yield the
1004 same effect the instruction would have had if we had executed it
1005 at its original address. We use this in step n3.
1006
1007 - gdbarch_displaced_step_free_closure provides cleanup.
1008
1009 The gdbarch_displaced_step_copy_insn and
1010 gdbarch_displaced_step_fixup functions must be written so that
1011 copying an instruction with gdbarch_displaced_step_copy_insn,
1012 single-stepping across the copied instruction, and then applying
1013 gdbarch_displaced_insn_fixup should have the same effects on the
1014 thread's memory and registers as stepping the instruction in place
1015 would have. Exactly which responsibilities fall to the copy and
1016 which fall to the fixup is up to the author of those functions.
1017
1018 See the comments in gdbarch.sh for details.
1019
1020 Note that displaced stepping and software single-step cannot
1021 currently be used in combination, although with some care I think
1022 they could be made to. Software single-step works by placing
1023 breakpoints on all possible subsequent instructions; if the
1024 displaced instruction is a PC-relative jump, those breakpoints
1025 could fall in very strange places --- on pages that aren't
1026 executable, or at addresses that are not proper instruction
1027 boundaries. (We do generally let other threads run while we wait
1028 to hit the software single-step breakpoint, and they might
1029 encounter such a corrupted instruction.) One way to work around
1030 this would be to have gdbarch_displaced_step_copy_insn fully
1031 simulate the effect of PC-relative instructions (and return NULL)
1032 on architectures that use software single-stepping.
1033
1034 In non-stop mode, we can have independent and simultaneous step
1035 requests, so more than one thread may need to simultaneously step
1036 over a breakpoint. The current implementation assumes there is
1037 only one scratch space per process. In this case, we have to
1038 serialize access to the scratch space. If thread A wants to step
1039 over a breakpoint, but we are currently waiting for some other
1040 thread to complete a displaced step, we leave thread A stopped and
1041 place it in the displaced_step_request_queue. Whenever a displaced
1042 step finishes, we pick the next thread in the queue and start a new
1043 displaced step operation on it. See displaced_step_prepare and
1044 displaced_step_fixup for details. */
1045
1046 struct displaced_step_request
1047 {
1048 ptid_t ptid;
1049 struct displaced_step_request *next;
1050 };
1051
1052 /* Per-inferior displaced stepping state. */
1053 struct displaced_step_inferior_state
1054 {
1055 /* Pointer to next in linked list. */
1056 struct displaced_step_inferior_state *next;
1057
1058 /* The process this displaced step state refers to. */
1059 int pid;
1060
1061 /* A queue of pending displaced stepping requests. One entry per
1062 thread that needs to do a displaced step. */
1063 struct displaced_step_request *step_request_queue;
1064
1065 /* If this is not null_ptid, this is the thread carrying out a
1066 displaced single-step in process PID. This thread's state will
1067 require fixing up once it has completed its step. */
1068 ptid_t step_ptid;
1069
1070 /* The architecture the thread had when we stepped it. */
1071 struct gdbarch *step_gdbarch;
1072
1073 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1074 for post-step cleanup. */
1075 struct displaced_step_closure *step_closure;
1076
1077 /* The address of the original instruction, and the copy we
1078 made. */
1079 CORE_ADDR step_original, step_copy;
1080
1081 /* Saved contents of copy area. */
1082 gdb_byte *step_saved_copy;
1083 };
1084
1085 /* The list of states of processes involved in displaced stepping
1086 presently. */
1087 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1088
1089 /* Get the displaced stepping state of process PID. */
1090
1091 static struct displaced_step_inferior_state *
1092 get_displaced_stepping_state (int pid)
1093 {
1094 struct displaced_step_inferior_state *state;
1095
1096 for (state = displaced_step_inferior_states;
1097 state != NULL;
1098 state = state->next)
1099 if (state->pid == pid)
1100 return state;
1101
1102 return NULL;
1103 }
1104
1105 /* Add a new displaced stepping state for process PID to the displaced
1106 stepping state list, or return a pointer to an already existing
1107 entry, if it already exists. Never returns NULL. */
1108
1109 static struct displaced_step_inferior_state *
1110 add_displaced_stepping_state (int pid)
1111 {
1112 struct displaced_step_inferior_state *state;
1113
1114 for (state = displaced_step_inferior_states;
1115 state != NULL;
1116 state = state->next)
1117 if (state->pid == pid)
1118 return state;
1119
1120 state = xcalloc (1, sizeof (*state));
1121 state->pid = pid;
1122 state->next = displaced_step_inferior_states;
1123 displaced_step_inferior_states = state;
1124
1125 return state;
1126 }
1127
1128 /* If inferior is in displaced stepping, and ADDR equals to starting address
1129 of copy area, return corresponding displaced_step_closure. Otherwise,
1130 return NULL. */
1131
1132 struct displaced_step_closure*
1133 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1134 {
1135 struct displaced_step_inferior_state *displaced
1136 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1137
1138 /* If checking the mode of displaced instruction in copy area. */
1139 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1140 && (displaced->step_copy == addr))
1141 return displaced->step_closure;
1142
1143 return NULL;
1144 }
1145
1146 /* Remove the displaced stepping state of process PID. */
1147
1148 static void
1149 remove_displaced_stepping_state (int pid)
1150 {
1151 struct displaced_step_inferior_state *it, **prev_next_p;
1152
1153 gdb_assert (pid != 0);
1154
1155 it = displaced_step_inferior_states;
1156 prev_next_p = &displaced_step_inferior_states;
1157 while (it)
1158 {
1159 if (it->pid == pid)
1160 {
1161 *prev_next_p = it->next;
1162 xfree (it);
1163 return;
1164 }
1165
1166 prev_next_p = &it->next;
1167 it = *prev_next_p;
1168 }
1169 }
1170
1171 static void
1172 infrun_inferior_exit (struct inferior *inf)
1173 {
1174 remove_displaced_stepping_state (inf->pid);
1175 }
1176
1177 /* Enum strings for "set|show displaced-stepping". */
1178
1179 static const char can_use_displaced_stepping_auto[] = "auto";
1180 static const char can_use_displaced_stepping_on[] = "on";
1181 static const char can_use_displaced_stepping_off[] = "off";
1182 static const char *const can_use_displaced_stepping_enum[] =
1183 {
1184 can_use_displaced_stepping_auto,
1185 can_use_displaced_stepping_on,
1186 can_use_displaced_stepping_off,
1187 NULL,
1188 };
1189
1190 /* If ON, and the architecture supports it, GDB will use displaced
1191 stepping to step over breakpoints. If OFF, or if the architecture
1192 doesn't support it, GDB will instead use the traditional
1193 hold-and-step approach. If AUTO (which is the default), GDB will
1194 decide which technique to use to step over breakpoints depending on
1195 which of all-stop or non-stop mode is active --- displaced stepping
1196 in non-stop mode; hold-and-step in all-stop mode. */
1197
1198 static const char *can_use_displaced_stepping =
1199 can_use_displaced_stepping_auto;
1200
1201 static void
1202 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1203 struct cmd_list_element *c,
1204 const char *value)
1205 {
1206 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1207 fprintf_filtered (file,
1208 _("Debugger's willingness to use displaced stepping "
1209 "to step over breakpoints is %s (currently %s).\n"),
1210 value, non_stop ? "on" : "off");
1211 else
1212 fprintf_filtered (file,
1213 _("Debugger's willingness to use displaced stepping "
1214 "to step over breakpoints is %s.\n"), value);
1215 }
1216
1217 /* Return non-zero if displaced stepping can/should be used to step
1218 over breakpoints. */
1219
1220 static int
1221 use_displaced_stepping (struct gdbarch *gdbarch)
1222 {
1223 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1224 && non_stop)
1225 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1226 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1227 && !RECORD_IS_USED);
1228 }
1229
1230 /* Clean out any stray displaced stepping state. */
1231 static void
1232 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1233 {
1234 /* Indicate that there is no cleanup pending. */
1235 displaced->step_ptid = null_ptid;
1236
1237 if (displaced->step_closure)
1238 {
1239 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1240 displaced->step_closure);
1241 displaced->step_closure = NULL;
1242 }
1243 }
1244
1245 static void
1246 displaced_step_clear_cleanup (void *arg)
1247 {
1248 struct displaced_step_inferior_state *state = arg;
1249
1250 displaced_step_clear (state);
1251 }
1252
1253 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1254 void
1255 displaced_step_dump_bytes (struct ui_file *file,
1256 const gdb_byte *buf,
1257 size_t len)
1258 {
1259 int i;
1260
1261 for (i = 0; i < len; i++)
1262 fprintf_unfiltered (file, "%02x ", buf[i]);
1263 fputs_unfiltered ("\n", file);
1264 }
1265
1266 /* Prepare to single-step, using displaced stepping.
1267
1268 Note that we cannot use displaced stepping when we have a signal to
1269 deliver. If we have a signal to deliver and an instruction to step
1270 over, then after the step, there will be no indication from the
1271 target whether the thread entered a signal handler or ignored the
1272 signal and stepped over the instruction successfully --- both cases
1273 result in a simple SIGTRAP. In the first case we mustn't do a
1274 fixup, and in the second case we must --- but we can't tell which.
1275 Comments in the code for 'random signals' in handle_inferior_event
1276 explain how we handle this case instead.
1277
1278 Returns 1 if preparing was successful -- this thread is going to be
1279 stepped now; or 0 if displaced stepping this thread got queued. */
1280 static int
1281 displaced_step_prepare (ptid_t ptid)
1282 {
1283 struct cleanup *old_cleanups, *ignore_cleanups;
1284 struct regcache *regcache = get_thread_regcache (ptid);
1285 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1286 CORE_ADDR original, copy;
1287 ULONGEST len;
1288 struct displaced_step_closure *closure;
1289 struct displaced_step_inferior_state *displaced;
1290
1291 /* We should never reach this function if the architecture does not
1292 support displaced stepping. */
1293 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1294
1295 /* We have to displaced step one thread at a time, as we only have
1296 access to a single scratch space per inferior. */
1297
1298 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1299
1300 if (!ptid_equal (displaced->step_ptid, null_ptid))
1301 {
1302 /* Already waiting for a displaced step to finish. Defer this
1303 request and place in queue. */
1304 struct displaced_step_request *req, *new_req;
1305
1306 if (debug_displaced)
1307 fprintf_unfiltered (gdb_stdlog,
1308 "displaced: defering step of %s\n",
1309 target_pid_to_str (ptid));
1310
1311 new_req = xmalloc (sizeof (*new_req));
1312 new_req->ptid = ptid;
1313 new_req->next = NULL;
1314
1315 if (displaced->step_request_queue)
1316 {
1317 for (req = displaced->step_request_queue;
1318 req && req->next;
1319 req = req->next)
1320 ;
1321 req->next = new_req;
1322 }
1323 else
1324 displaced->step_request_queue = new_req;
1325
1326 return 0;
1327 }
1328 else
1329 {
1330 if (debug_displaced)
1331 fprintf_unfiltered (gdb_stdlog,
1332 "displaced: stepping %s now\n",
1333 target_pid_to_str (ptid));
1334 }
1335
1336 displaced_step_clear (displaced);
1337
1338 old_cleanups = save_inferior_ptid ();
1339 inferior_ptid = ptid;
1340
1341 original = regcache_read_pc (regcache);
1342
1343 copy = gdbarch_displaced_step_location (gdbarch);
1344 len = gdbarch_max_insn_length (gdbarch);
1345
1346 /* Save the original contents of the copy area. */
1347 displaced->step_saved_copy = xmalloc (len);
1348 ignore_cleanups = make_cleanup (free_current_contents,
1349 &displaced->step_saved_copy);
1350 read_memory (copy, displaced->step_saved_copy, len);
1351 if (debug_displaced)
1352 {
1353 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1354 paddress (gdbarch, copy));
1355 displaced_step_dump_bytes (gdb_stdlog,
1356 displaced->step_saved_copy,
1357 len);
1358 };
1359
1360 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1361 original, copy, regcache);
1362
1363 /* We don't support the fully-simulated case at present. */
1364 gdb_assert (closure);
1365
1366 /* Save the information we need to fix things up if the step
1367 succeeds. */
1368 displaced->step_ptid = ptid;
1369 displaced->step_gdbarch = gdbarch;
1370 displaced->step_closure = closure;
1371 displaced->step_original = original;
1372 displaced->step_copy = copy;
1373
1374 make_cleanup (displaced_step_clear_cleanup, displaced);
1375
1376 /* Resume execution at the copy. */
1377 regcache_write_pc (regcache, copy);
1378
1379 discard_cleanups (ignore_cleanups);
1380
1381 do_cleanups (old_cleanups);
1382
1383 if (debug_displaced)
1384 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1385 paddress (gdbarch, copy));
1386
1387 return 1;
1388 }
1389
1390 static void
1391 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1392 const gdb_byte *myaddr, int len)
1393 {
1394 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1395
1396 inferior_ptid = ptid;
1397 write_memory (memaddr, myaddr, len);
1398 do_cleanups (ptid_cleanup);
1399 }
1400
1401 /* Restore the contents of the copy area for thread PTID. */
1402
1403 static void
1404 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1405 ptid_t ptid)
1406 {
1407 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1408
1409 write_memory_ptid (ptid, displaced->step_copy,
1410 displaced->step_saved_copy, len);
1411 if (debug_displaced)
1412 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1413 target_pid_to_str (ptid),
1414 paddress (displaced->step_gdbarch,
1415 displaced->step_copy));
1416 }
1417
1418 static void
1419 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1420 {
1421 struct cleanup *old_cleanups;
1422 struct displaced_step_inferior_state *displaced
1423 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1424
1425 /* Was any thread of this process doing a displaced step? */
1426 if (displaced == NULL)
1427 return;
1428
1429 /* Was this event for the pid we displaced? */
1430 if (ptid_equal (displaced->step_ptid, null_ptid)
1431 || ! ptid_equal (displaced->step_ptid, event_ptid))
1432 return;
1433
1434 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1435
1436 displaced_step_restore (displaced, displaced->step_ptid);
1437
1438 /* Did the instruction complete successfully? */
1439 if (signal == TARGET_SIGNAL_TRAP)
1440 {
1441 /* Fix up the resulting state. */
1442 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1443 displaced->step_closure,
1444 displaced->step_original,
1445 displaced->step_copy,
1446 get_thread_regcache (displaced->step_ptid));
1447 }
1448 else
1449 {
1450 /* Since the instruction didn't complete, all we can do is
1451 relocate the PC. */
1452 struct regcache *regcache = get_thread_regcache (event_ptid);
1453 CORE_ADDR pc = regcache_read_pc (regcache);
1454
1455 pc = displaced->step_original + (pc - displaced->step_copy);
1456 regcache_write_pc (regcache, pc);
1457 }
1458
1459 do_cleanups (old_cleanups);
1460
1461 displaced->step_ptid = null_ptid;
1462
1463 /* Are there any pending displaced stepping requests? If so, run
1464 one now. Leave the state object around, since we're likely to
1465 need it again soon. */
1466 while (displaced->step_request_queue)
1467 {
1468 struct displaced_step_request *head;
1469 ptid_t ptid;
1470 struct regcache *regcache;
1471 struct gdbarch *gdbarch;
1472 CORE_ADDR actual_pc;
1473 struct address_space *aspace;
1474
1475 head = displaced->step_request_queue;
1476 ptid = head->ptid;
1477 displaced->step_request_queue = head->next;
1478 xfree (head);
1479
1480 context_switch (ptid);
1481
1482 regcache = get_thread_regcache (ptid);
1483 actual_pc = regcache_read_pc (regcache);
1484 aspace = get_regcache_aspace (regcache);
1485
1486 if (breakpoint_here_p (aspace, actual_pc))
1487 {
1488 if (debug_displaced)
1489 fprintf_unfiltered (gdb_stdlog,
1490 "displaced: stepping queued %s now\n",
1491 target_pid_to_str (ptid));
1492
1493 displaced_step_prepare (ptid);
1494
1495 gdbarch = get_regcache_arch (regcache);
1496
1497 if (debug_displaced)
1498 {
1499 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1500 gdb_byte buf[4];
1501
1502 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1503 paddress (gdbarch, actual_pc));
1504 read_memory (actual_pc, buf, sizeof (buf));
1505 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1506 }
1507
1508 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1509 displaced->step_closure))
1510 target_resume (ptid, 1, TARGET_SIGNAL_0);
1511 else
1512 target_resume (ptid, 0, TARGET_SIGNAL_0);
1513
1514 /* Done, we're stepping a thread. */
1515 break;
1516 }
1517 else
1518 {
1519 int step;
1520 struct thread_info *tp = inferior_thread ();
1521
1522 /* The breakpoint we were sitting under has since been
1523 removed. */
1524 tp->control.trap_expected = 0;
1525
1526 /* Go back to what we were trying to do. */
1527 step = currently_stepping (tp);
1528
1529 if (debug_displaced)
1530 fprintf_unfiltered (gdb_stdlog,
1531 "breakpoint is gone %s: step(%d)\n",
1532 target_pid_to_str (tp->ptid), step);
1533
1534 target_resume (ptid, step, TARGET_SIGNAL_0);
1535 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1536
1537 /* This request was discarded. See if there's any other
1538 thread waiting for its turn. */
1539 }
1540 }
1541 }
1542
1543 /* Update global variables holding ptids to hold NEW_PTID if they were
1544 holding OLD_PTID. */
1545 static void
1546 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1547 {
1548 struct displaced_step_request *it;
1549 struct displaced_step_inferior_state *displaced;
1550
1551 if (ptid_equal (inferior_ptid, old_ptid))
1552 inferior_ptid = new_ptid;
1553
1554 if (ptid_equal (singlestep_ptid, old_ptid))
1555 singlestep_ptid = new_ptid;
1556
1557 if (ptid_equal (deferred_step_ptid, old_ptid))
1558 deferred_step_ptid = new_ptid;
1559
1560 for (displaced = displaced_step_inferior_states;
1561 displaced;
1562 displaced = displaced->next)
1563 {
1564 if (ptid_equal (displaced->step_ptid, old_ptid))
1565 displaced->step_ptid = new_ptid;
1566
1567 for (it = displaced->step_request_queue; it; it = it->next)
1568 if (ptid_equal (it->ptid, old_ptid))
1569 it->ptid = new_ptid;
1570 }
1571 }
1572
1573 \f
1574 /* Resuming. */
1575
1576 /* Things to clean up if we QUIT out of resume (). */
1577 static void
1578 resume_cleanups (void *ignore)
1579 {
1580 normal_stop ();
1581 }
1582
1583 static const char schedlock_off[] = "off";
1584 static const char schedlock_on[] = "on";
1585 static const char schedlock_step[] = "step";
1586 static const char *const scheduler_enums[] = {
1587 schedlock_off,
1588 schedlock_on,
1589 schedlock_step,
1590 NULL
1591 };
1592 static const char *scheduler_mode = schedlock_off;
1593 static void
1594 show_scheduler_mode (struct ui_file *file, int from_tty,
1595 struct cmd_list_element *c, const char *value)
1596 {
1597 fprintf_filtered (file,
1598 _("Mode for locking scheduler "
1599 "during execution is \"%s\".\n"),
1600 value);
1601 }
1602
1603 static void
1604 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1605 {
1606 if (!target_can_lock_scheduler)
1607 {
1608 scheduler_mode = schedlock_off;
1609 error (_("Target '%s' cannot support this command."), target_shortname);
1610 }
1611 }
1612
1613 /* True if execution commands resume all threads of all processes by
1614 default; otherwise, resume only threads of the current inferior
1615 process. */
1616 int sched_multi = 0;
1617
1618 /* Try to setup for software single stepping over the specified location.
1619 Return 1 if target_resume() should use hardware single step.
1620
1621 GDBARCH the current gdbarch.
1622 PC the location to step over. */
1623
1624 static int
1625 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1626 {
1627 int hw_step = 1;
1628
1629 if (execution_direction == EXEC_FORWARD
1630 && gdbarch_software_single_step_p (gdbarch)
1631 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1632 {
1633 hw_step = 0;
1634 /* Do not pull these breakpoints until after a `wait' in
1635 `wait_for_inferior'. */
1636 singlestep_breakpoints_inserted_p = 1;
1637 singlestep_ptid = inferior_ptid;
1638 singlestep_pc = pc;
1639 }
1640 return hw_step;
1641 }
1642
1643 /* Return a ptid representing the set of threads that we will proceed,
1644 in the perspective of the user/frontend. We may actually resume
1645 fewer threads at first, e.g., if a thread is stopped at a
1646 breakpoint that needs stepping-off, but that should not be visible
1647 to the user/frontend, and neither should the frontend/user be
1648 allowed to proceed any of the threads that happen to be stopped for
1649 internal run control handling, if a previous command wanted them
1650 resumed. */
1651
1652 ptid_t
1653 user_visible_resume_ptid (int step)
1654 {
1655 /* By default, resume all threads of all processes. */
1656 ptid_t resume_ptid = RESUME_ALL;
1657
1658 /* Maybe resume only all threads of the current process. */
1659 if (!sched_multi && target_supports_multi_process ())
1660 {
1661 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1662 }
1663
1664 /* Maybe resume a single thread after all. */
1665 if (non_stop)
1666 {
1667 /* With non-stop mode on, threads are always handled
1668 individually. */
1669 resume_ptid = inferior_ptid;
1670 }
1671 else if ((scheduler_mode == schedlock_on)
1672 || (scheduler_mode == schedlock_step
1673 && (step || singlestep_breakpoints_inserted_p)))
1674 {
1675 /* User-settable 'scheduler' mode requires solo thread resume. */
1676 resume_ptid = inferior_ptid;
1677 }
1678
1679 return resume_ptid;
1680 }
1681
1682 /* Resume the inferior, but allow a QUIT. This is useful if the user
1683 wants to interrupt some lengthy single-stepping operation
1684 (for child processes, the SIGINT goes to the inferior, and so
1685 we get a SIGINT random_signal, but for remote debugging and perhaps
1686 other targets, that's not true).
1687
1688 STEP nonzero if we should step (zero to continue instead).
1689 SIG is the signal to give the inferior (zero for none). */
1690 void
1691 resume (int step, enum target_signal sig)
1692 {
1693 int should_resume = 1;
1694 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1695 struct regcache *regcache = get_current_regcache ();
1696 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1697 struct thread_info *tp = inferior_thread ();
1698 CORE_ADDR pc = regcache_read_pc (regcache);
1699 struct address_space *aspace = get_regcache_aspace (regcache);
1700
1701 QUIT;
1702
1703 if (current_inferior ()->waiting_for_vfork_done)
1704 {
1705 /* Don't try to single-step a vfork parent that is waiting for
1706 the child to get out of the shared memory region (by exec'ing
1707 or exiting). This is particularly important on software
1708 single-step archs, as the child process would trip on the
1709 software single step breakpoint inserted for the parent
1710 process. Since the parent will not actually execute any
1711 instruction until the child is out of the shared region (such
1712 are vfork's semantics), it is safe to simply continue it.
1713 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1714 the parent, and tell it to `keep_going', which automatically
1715 re-sets it stepping. */
1716 if (debug_infrun)
1717 fprintf_unfiltered (gdb_stdlog,
1718 "infrun: resume : clear step\n");
1719 step = 0;
1720 }
1721
1722 if (debug_infrun)
1723 fprintf_unfiltered (gdb_stdlog,
1724 "infrun: resume (step=%d, signal=%d), "
1725 "trap_expected=%d, current thread [%s] at %s\n",
1726 step, sig, tp->control.trap_expected,
1727 target_pid_to_str (inferior_ptid),
1728 paddress (gdbarch, pc));
1729
1730 /* Normally, by the time we reach `resume', the breakpoints are either
1731 removed or inserted, as appropriate. The exception is if we're sitting
1732 at a permanent breakpoint; we need to step over it, but permanent
1733 breakpoints can't be removed. So we have to test for it here. */
1734 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1735 {
1736 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1737 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1738 else
1739 error (_("\
1740 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1741 how to step past a permanent breakpoint on this architecture. Try using\n\
1742 a command like `return' or `jump' to continue execution."));
1743 }
1744
1745 /* If enabled, step over breakpoints by executing a copy of the
1746 instruction at a different address.
1747
1748 We can't use displaced stepping when we have a signal to deliver;
1749 the comments for displaced_step_prepare explain why. The
1750 comments in the handle_inferior event for dealing with 'random
1751 signals' explain what we do instead.
1752
1753 We can't use displaced stepping when we are waiting for vfork_done
1754 event, displaced stepping breaks the vfork child similarly as single
1755 step software breakpoint. */
1756 if (use_displaced_stepping (gdbarch)
1757 && (tp->control.trap_expected
1758 || (step && gdbarch_software_single_step_p (gdbarch)))
1759 && sig == TARGET_SIGNAL_0
1760 && !current_inferior ()->waiting_for_vfork_done)
1761 {
1762 struct displaced_step_inferior_state *displaced;
1763
1764 if (!displaced_step_prepare (inferior_ptid))
1765 {
1766 /* Got placed in displaced stepping queue. Will be resumed
1767 later when all the currently queued displaced stepping
1768 requests finish. The thread is not executing at this point,
1769 and the call to set_executing will be made later. But we
1770 need to call set_running here, since from frontend point of view,
1771 the thread is running. */
1772 set_running (inferior_ptid, 1);
1773 discard_cleanups (old_cleanups);
1774 return;
1775 }
1776
1777 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1778 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1779 displaced->step_closure);
1780 }
1781
1782 /* Do we need to do it the hard way, w/temp breakpoints? */
1783 else if (step)
1784 step = maybe_software_singlestep (gdbarch, pc);
1785
1786 /* Currently, our software single-step implementation leads to different
1787 results than hardware single-stepping in one situation: when stepping
1788 into delivering a signal which has an associated signal handler,
1789 hardware single-step will stop at the first instruction of the handler,
1790 while software single-step will simply skip execution of the handler.
1791
1792 For now, this difference in behavior is accepted since there is no
1793 easy way to actually implement single-stepping into a signal handler
1794 without kernel support.
1795
1796 However, there is one scenario where this difference leads to follow-on
1797 problems: if we're stepping off a breakpoint by removing all breakpoints
1798 and then single-stepping. In this case, the software single-step
1799 behavior means that even if there is a *breakpoint* in the signal
1800 handler, GDB still would not stop.
1801
1802 Fortunately, we can at least fix this particular issue. We detect
1803 here the case where we are about to deliver a signal while software
1804 single-stepping with breakpoints removed. In this situation, we
1805 revert the decisions to remove all breakpoints and insert single-
1806 step breakpoints, and instead we install a step-resume breakpoint
1807 at the current address, deliver the signal without stepping, and
1808 once we arrive back at the step-resume breakpoint, actually step
1809 over the breakpoint we originally wanted to step over. */
1810 if (singlestep_breakpoints_inserted_p
1811 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1812 {
1813 /* If we have nested signals or a pending signal is delivered
1814 immediately after a handler returns, might might already have
1815 a step-resume breakpoint set on the earlier handler. We cannot
1816 set another step-resume breakpoint; just continue on until the
1817 original breakpoint is hit. */
1818 if (tp->control.step_resume_breakpoint == NULL)
1819 {
1820 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1821 tp->step_after_step_resume_breakpoint = 1;
1822 }
1823
1824 remove_single_step_breakpoints ();
1825 singlestep_breakpoints_inserted_p = 0;
1826
1827 insert_breakpoints ();
1828 tp->control.trap_expected = 0;
1829 }
1830
1831 if (should_resume)
1832 {
1833 ptid_t resume_ptid;
1834
1835 /* If STEP is set, it's a request to use hardware stepping
1836 facilities. But in that case, we should never
1837 use singlestep breakpoint. */
1838 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1839
1840 /* Decide the set of threads to ask the target to resume. Start
1841 by assuming everything will be resumed, than narrow the set
1842 by applying increasingly restricting conditions. */
1843 resume_ptid = user_visible_resume_ptid (step);
1844
1845 /* Maybe resume a single thread after all. */
1846 if (singlestep_breakpoints_inserted_p
1847 && stepping_past_singlestep_breakpoint)
1848 {
1849 /* The situation here is as follows. In thread T1 we wanted to
1850 single-step. Lacking hardware single-stepping we've
1851 set breakpoint at the PC of the next instruction -- call it
1852 P. After resuming, we've hit that breakpoint in thread T2.
1853 Now we've removed original breakpoint, inserted breakpoint
1854 at P+1, and try to step to advance T2 past breakpoint.
1855 We need to step only T2, as if T1 is allowed to freely run,
1856 it can run past P, and if other threads are allowed to run,
1857 they can hit breakpoint at P+1, and nested hits of single-step
1858 breakpoints is not something we'd want -- that's complicated
1859 to support, and has no value. */
1860 resume_ptid = inferior_ptid;
1861 }
1862 else if ((step || singlestep_breakpoints_inserted_p)
1863 && tp->control.trap_expected)
1864 {
1865 /* We're allowing a thread to run past a breakpoint it has
1866 hit, by single-stepping the thread with the breakpoint
1867 removed. In which case, we need to single-step only this
1868 thread, and keep others stopped, as they can miss this
1869 breakpoint if allowed to run.
1870
1871 The current code actually removes all breakpoints when
1872 doing this, not just the one being stepped over, so if we
1873 let other threads run, we can actually miss any
1874 breakpoint, not just the one at PC. */
1875 resume_ptid = inferior_ptid;
1876 }
1877
1878 if (gdbarch_cannot_step_breakpoint (gdbarch))
1879 {
1880 /* Most targets can step a breakpoint instruction, thus
1881 executing it normally. But if this one cannot, just
1882 continue and we will hit it anyway. */
1883 if (step && breakpoint_inserted_here_p (aspace, pc))
1884 step = 0;
1885 }
1886
1887 if (debug_displaced
1888 && use_displaced_stepping (gdbarch)
1889 && tp->control.trap_expected)
1890 {
1891 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1892 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1893 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1894 gdb_byte buf[4];
1895
1896 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1897 paddress (resume_gdbarch, actual_pc));
1898 read_memory (actual_pc, buf, sizeof (buf));
1899 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1900 }
1901
1902 /* Install inferior's terminal modes. */
1903 target_terminal_inferior ();
1904
1905 /* Avoid confusing the next resume, if the next stop/resume
1906 happens to apply to another thread. */
1907 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1908
1909 /* Advise target which signals may be handled silently. If we have
1910 removed breakpoints because we are stepping over one (which can
1911 happen only if we are not using displaced stepping), we need to
1912 receive all signals to avoid accidentally skipping a breakpoint
1913 during execution of a signal handler. */
1914 if ((step || singlestep_breakpoints_inserted_p)
1915 && tp->control.trap_expected
1916 && !use_displaced_stepping (gdbarch))
1917 target_pass_signals (0, NULL);
1918 else
1919 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1920
1921 target_resume (resume_ptid, step, sig);
1922 }
1923
1924 discard_cleanups (old_cleanups);
1925 }
1926 \f
1927 /* Proceeding. */
1928
1929 /* Clear out all variables saying what to do when inferior is continued.
1930 First do this, then set the ones you want, then call `proceed'. */
1931
1932 static void
1933 clear_proceed_status_thread (struct thread_info *tp)
1934 {
1935 if (debug_infrun)
1936 fprintf_unfiltered (gdb_stdlog,
1937 "infrun: clear_proceed_status_thread (%s)\n",
1938 target_pid_to_str (tp->ptid));
1939
1940 tp->control.trap_expected = 0;
1941 tp->control.step_range_start = 0;
1942 tp->control.step_range_end = 0;
1943 tp->control.step_frame_id = null_frame_id;
1944 tp->control.step_stack_frame_id = null_frame_id;
1945 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1946 tp->stop_requested = 0;
1947
1948 tp->control.stop_step = 0;
1949
1950 tp->control.proceed_to_finish = 0;
1951
1952 /* Discard any remaining commands or status from previous stop. */
1953 bpstat_clear (&tp->control.stop_bpstat);
1954 }
1955
1956 static int
1957 clear_proceed_status_callback (struct thread_info *tp, void *data)
1958 {
1959 if (is_exited (tp->ptid))
1960 return 0;
1961
1962 clear_proceed_status_thread (tp);
1963 return 0;
1964 }
1965
1966 void
1967 clear_proceed_status (void)
1968 {
1969 if (!non_stop)
1970 {
1971 /* In all-stop mode, delete the per-thread status of all
1972 threads, even if inferior_ptid is null_ptid, there may be
1973 threads on the list. E.g., we may be launching a new
1974 process, while selecting the executable. */
1975 iterate_over_threads (clear_proceed_status_callback, NULL);
1976 }
1977
1978 if (!ptid_equal (inferior_ptid, null_ptid))
1979 {
1980 struct inferior *inferior;
1981
1982 if (non_stop)
1983 {
1984 /* If in non-stop mode, only delete the per-thread status of
1985 the current thread. */
1986 clear_proceed_status_thread (inferior_thread ());
1987 }
1988
1989 inferior = current_inferior ();
1990 inferior->control.stop_soon = NO_STOP_QUIETLY;
1991 }
1992
1993 stop_after_trap = 0;
1994
1995 observer_notify_about_to_proceed ();
1996
1997 if (stop_registers)
1998 {
1999 regcache_xfree (stop_registers);
2000 stop_registers = NULL;
2001 }
2002 }
2003
2004 /* Check the current thread against the thread that reported the most recent
2005 event. If a step-over is required return TRUE and set the current thread
2006 to the old thread. Otherwise return FALSE.
2007
2008 This should be suitable for any targets that support threads. */
2009
2010 static int
2011 prepare_to_proceed (int step)
2012 {
2013 ptid_t wait_ptid;
2014 struct target_waitstatus wait_status;
2015 int schedlock_enabled;
2016
2017 /* With non-stop mode on, threads are always handled individually. */
2018 gdb_assert (! non_stop);
2019
2020 /* Get the last target status returned by target_wait(). */
2021 get_last_target_status (&wait_ptid, &wait_status);
2022
2023 /* Make sure we were stopped at a breakpoint. */
2024 if (wait_status.kind != TARGET_WAITKIND_STOPPED
2025 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
2026 && wait_status.value.sig != TARGET_SIGNAL_ILL
2027 && wait_status.value.sig != TARGET_SIGNAL_SEGV
2028 && wait_status.value.sig != TARGET_SIGNAL_EMT))
2029 {
2030 return 0;
2031 }
2032
2033 schedlock_enabled = (scheduler_mode == schedlock_on
2034 || (scheduler_mode == schedlock_step
2035 && step));
2036
2037 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
2038 if (schedlock_enabled)
2039 return 0;
2040
2041 /* Don't switch over if we're about to resume some other process
2042 other than WAIT_PTID's, and schedule-multiple is off. */
2043 if (!sched_multi
2044 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2045 return 0;
2046
2047 /* Switched over from WAIT_PID. */
2048 if (!ptid_equal (wait_ptid, minus_one_ptid)
2049 && !ptid_equal (inferior_ptid, wait_ptid))
2050 {
2051 struct regcache *regcache = get_thread_regcache (wait_ptid);
2052
2053 if (breakpoint_here_p (get_regcache_aspace (regcache),
2054 regcache_read_pc (regcache)))
2055 {
2056 /* If stepping, remember current thread to switch back to. */
2057 if (step)
2058 deferred_step_ptid = inferior_ptid;
2059
2060 /* Switch back to WAIT_PID thread. */
2061 switch_to_thread (wait_ptid);
2062
2063 if (debug_infrun)
2064 fprintf_unfiltered (gdb_stdlog,
2065 "infrun: prepare_to_proceed (step=%d), "
2066 "switched to [%s]\n",
2067 step, target_pid_to_str (inferior_ptid));
2068
2069 /* We return 1 to indicate that there is a breakpoint here,
2070 so we need to step over it before continuing to avoid
2071 hitting it straight away. */
2072 return 1;
2073 }
2074 }
2075
2076 return 0;
2077 }
2078
2079 /* Basic routine for continuing the program in various fashions.
2080
2081 ADDR is the address to resume at, or -1 for resume where stopped.
2082 SIGGNAL is the signal to give it, or 0 for none,
2083 or -1 for act according to how it stopped.
2084 STEP is nonzero if should trap after one instruction.
2085 -1 means return after that and print nothing.
2086 You should probably set various step_... variables
2087 before calling here, if you are stepping.
2088
2089 You should call clear_proceed_status before calling proceed. */
2090
2091 void
2092 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2093 {
2094 struct regcache *regcache;
2095 struct gdbarch *gdbarch;
2096 struct thread_info *tp;
2097 CORE_ADDR pc;
2098 struct address_space *aspace;
2099 int oneproc = 0;
2100
2101 /* If we're stopped at a fork/vfork, follow the branch set by the
2102 "set follow-fork-mode" command; otherwise, we'll just proceed
2103 resuming the current thread. */
2104 if (!follow_fork ())
2105 {
2106 /* The target for some reason decided not to resume. */
2107 normal_stop ();
2108 if (target_can_async_p ())
2109 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2110 return;
2111 }
2112
2113 /* We'll update this if & when we switch to a new thread. */
2114 previous_inferior_ptid = inferior_ptid;
2115
2116 regcache = get_current_regcache ();
2117 gdbarch = get_regcache_arch (regcache);
2118 aspace = get_regcache_aspace (regcache);
2119 pc = regcache_read_pc (regcache);
2120
2121 if (step > 0)
2122 step_start_function = find_pc_function (pc);
2123 if (step < 0)
2124 stop_after_trap = 1;
2125
2126 if (addr == (CORE_ADDR) -1)
2127 {
2128 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2129 && execution_direction != EXEC_REVERSE)
2130 /* There is a breakpoint at the address we will resume at,
2131 step one instruction before inserting breakpoints so that
2132 we do not stop right away (and report a second hit at this
2133 breakpoint).
2134
2135 Note, we don't do this in reverse, because we won't
2136 actually be executing the breakpoint insn anyway.
2137 We'll be (un-)executing the previous instruction. */
2138
2139 oneproc = 1;
2140 else if (gdbarch_single_step_through_delay_p (gdbarch)
2141 && gdbarch_single_step_through_delay (gdbarch,
2142 get_current_frame ()))
2143 /* We stepped onto an instruction that needs to be stepped
2144 again before re-inserting the breakpoint, do so. */
2145 oneproc = 1;
2146 }
2147 else
2148 {
2149 regcache_write_pc (regcache, addr);
2150 }
2151
2152 if (debug_infrun)
2153 fprintf_unfiltered (gdb_stdlog,
2154 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2155 paddress (gdbarch, addr), siggnal, step);
2156
2157 if (non_stop)
2158 /* In non-stop, each thread is handled individually. The context
2159 must already be set to the right thread here. */
2160 ;
2161 else
2162 {
2163 /* In a multi-threaded task we may select another thread and
2164 then continue or step.
2165
2166 But if the old thread was stopped at a breakpoint, it will
2167 immediately cause another breakpoint stop without any
2168 execution (i.e. it will report a breakpoint hit incorrectly).
2169 So we must step over it first.
2170
2171 prepare_to_proceed checks the current thread against the
2172 thread that reported the most recent event. If a step-over
2173 is required it returns TRUE and sets the current thread to
2174 the old thread. */
2175 if (prepare_to_proceed (step))
2176 oneproc = 1;
2177 }
2178
2179 /* prepare_to_proceed may change the current thread. */
2180 tp = inferior_thread ();
2181
2182 if (oneproc)
2183 {
2184 tp->control.trap_expected = 1;
2185 /* If displaced stepping is enabled, we can step over the
2186 breakpoint without hitting it, so leave all breakpoints
2187 inserted. Otherwise we need to disable all breakpoints, step
2188 one instruction, and then re-add them when that step is
2189 finished. */
2190 if (!use_displaced_stepping (gdbarch))
2191 remove_breakpoints ();
2192 }
2193
2194 /* We can insert breakpoints if we're not trying to step over one,
2195 or if we are stepping over one but we're using displaced stepping
2196 to do so. */
2197 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2198 insert_breakpoints ();
2199
2200 if (!non_stop)
2201 {
2202 /* Pass the last stop signal to the thread we're resuming,
2203 irrespective of whether the current thread is the thread that
2204 got the last event or not. This was historically GDB's
2205 behaviour before keeping a stop_signal per thread. */
2206
2207 struct thread_info *last_thread;
2208 ptid_t last_ptid;
2209 struct target_waitstatus last_status;
2210
2211 get_last_target_status (&last_ptid, &last_status);
2212 if (!ptid_equal (inferior_ptid, last_ptid)
2213 && !ptid_equal (last_ptid, null_ptid)
2214 && !ptid_equal (last_ptid, minus_one_ptid))
2215 {
2216 last_thread = find_thread_ptid (last_ptid);
2217 if (last_thread)
2218 {
2219 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2220 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2221 }
2222 }
2223 }
2224
2225 if (siggnal != TARGET_SIGNAL_DEFAULT)
2226 tp->suspend.stop_signal = siggnal;
2227 /* If this signal should not be seen by program,
2228 give it zero. Used for debugging signals. */
2229 else if (!signal_program[tp->suspend.stop_signal])
2230 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2231
2232 annotate_starting ();
2233
2234 /* Make sure that output from GDB appears before output from the
2235 inferior. */
2236 gdb_flush (gdb_stdout);
2237
2238 /* Refresh prev_pc value just prior to resuming. This used to be
2239 done in stop_stepping, however, setting prev_pc there did not handle
2240 scenarios such as inferior function calls or returning from
2241 a function via the return command. In those cases, the prev_pc
2242 value was not set properly for subsequent commands. The prev_pc value
2243 is used to initialize the starting line number in the ecs. With an
2244 invalid value, the gdb next command ends up stopping at the position
2245 represented by the next line table entry past our start position.
2246 On platforms that generate one line table entry per line, this
2247 is not a problem. However, on the ia64, the compiler generates
2248 extraneous line table entries that do not increase the line number.
2249 When we issue the gdb next command on the ia64 after an inferior call
2250 or a return command, we often end up a few instructions forward, still
2251 within the original line we started.
2252
2253 An attempt was made to refresh the prev_pc at the same time the
2254 execution_control_state is initialized (for instance, just before
2255 waiting for an inferior event). But this approach did not work
2256 because of platforms that use ptrace, where the pc register cannot
2257 be read unless the inferior is stopped. At that point, we are not
2258 guaranteed the inferior is stopped and so the regcache_read_pc() call
2259 can fail. Setting the prev_pc value here ensures the value is updated
2260 correctly when the inferior is stopped. */
2261 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2262
2263 /* Fill in with reasonable starting values. */
2264 init_thread_stepping_state (tp);
2265
2266 /* Reset to normal state. */
2267 init_infwait_state ();
2268
2269 /* Resume inferior. */
2270 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2271
2272 /* Wait for it to stop (if not standalone)
2273 and in any case decode why it stopped, and act accordingly. */
2274 /* Do this only if we are not using the event loop, or if the target
2275 does not support asynchronous execution. */
2276 if (!target_can_async_p ())
2277 {
2278 wait_for_inferior ();
2279 normal_stop ();
2280 }
2281 }
2282 \f
2283
2284 /* Start remote-debugging of a machine over a serial link. */
2285
2286 void
2287 start_remote (int from_tty)
2288 {
2289 struct inferior *inferior;
2290
2291 inferior = current_inferior ();
2292 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2293
2294 /* Always go on waiting for the target, regardless of the mode. */
2295 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2296 indicate to wait_for_inferior that a target should timeout if
2297 nothing is returned (instead of just blocking). Because of this,
2298 targets expecting an immediate response need to, internally, set
2299 things up so that the target_wait() is forced to eventually
2300 timeout. */
2301 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2302 differentiate to its caller what the state of the target is after
2303 the initial open has been performed. Here we're assuming that
2304 the target has stopped. It should be possible to eventually have
2305 target_open() return to the caller an indication that the target
2306 is currently running and GDB state should be set to the same as
2307 for an async run. */
2308 wait_for_inferior ();
2309
2310 /* Now that the inferior has stopped, do any bookkeeping like
2311 loading shared libraries. We want to do this before normal_stop,
2312 so that the displayed frame is up to date. */
2313 post_create_inferior (&current_target, from_tty);
2314
2315 normal_stop ();
2316 }
2317
2318 /* Initialize static vars when a new inferior begins. */
2319
2320 void
2321 init_wait_for_inferior (void)
2322 {
2323 /* These are meaningless until the first time through wait_for_inferior. */
2324
2325 breakpoint_init_inferior (inf_starting);
2326
2327 clear_proceed_status ();
2328
2329 stepping_past_singlestep_breakpoint = 0;
2330 deferred_step_ptid = null_ptid;
2331
2332 target_last_wait_ptid = minus_one_ptid;
2333
2334 previous_inferior_ptid = inferior_ptid;
2335 init_infwait_state ();
2336
2337 /* Discard any skipped inlined frames. */
2338 clear_inline_frame_state (minus_one_ptid);
2339 }
2340
2341 \f
2342 /* This enum encodes possible reasons for doing a target_wait, so that
2343 wfi can call target_wait in one place. (Ultimately the call will be
2344 moved out of the infinite loop entirely.) */
2345
2346 enum infwait_states
2347 {
2348 infwait_normal_state,
2349 infwait_thread_hop_state,
2350 infwait_step_watch_state,
2351 infwait_nonstep_watch_state
2352 };
2353
2354 /* The PTID we'll do a target_wait on.*/
2355 ptid_t waiton_ptid;
2356
2357 /* Current inferior wait state. */
2358 enum infwait_states infwait_state;
2359
2360 /* Data to be passed around while handling an event. This data is
2361 discarded between events. */
2362 struct execution_control_state
2363 {
2364 ptid_t ptid;
2365 /* The thread that got the event, if this was a thread event; NULL
2366 otherwise. */
2367 struct thread_info *event_thread;
2368
2369 struct target_waitstatus ws;
2370 int random_signal;
2371 int stop_func_filled_in;
2372 CORE_ADDR stop_func_start;
2373 CORE_ADDR stop_func_end;
2374 const char *stop_func_name;
2375 int new_thread_event;
2376 int wait_some_more;
2377 };
2378
2379 static void handle_inferior_event (struct execution_control_state *ecs);
2380
2381 static void handle_step_into_function (struct gdbarch *gdbarch,
2382 struct execution_control_state *ecs);
2383 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2384 struct execution_control_state *ecs);
2385 static void check_exception_resume (struct execution_control_state *,
2386 struct frame_info *, struct symbol *);
2387
2388 static void stop_stepping (struct execution_control_state *ecs);
2389 static void prepare_to_wait (struct execution_control_state *ecs);
2390 static void keep_going (struct execution_control_state *ecs);
2391
2392 /* Callback for iterate over threads. If the thread is stopped, but
2393 the user/frontend doesn't know about that yet, go through
2394 normal_stop, as if the thread had just stopped now. ARG points at
2395 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2396 ptid_is_pid(PTID) is true, applies to all threads of the process
2397 pointed at by PTID. Otherwise, apply only to the thread pointed by
2398 PTID. */
2399
2400 static int
2401 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2402 {
2403 ptid_t ptid = * (ptid_t *) arg;
2404
2405 if ((ptid_equal (info->ptid, ptid)
2406 || ptid_equal (minus_one_ptid, ptid)
2407 || (ptid_is_pid (ptid)
2408 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2409 && is_running (info->ptid)
2410 && !is_executing (info->ptid))
2411 {
2412 struct cleanup *old_chain;
2413 struct execution_control_state ecss;
2414 struct execution_control_state *ecs = &ecss;
2415
2416 memset (ecs, 0, sizeof (*ecs));
2417
2418 old_chain = make_cleanup_restore_current_thread ();
2419
2420 switch_to_thread (info->ptid);
2421
2422 /* Go through handle_inferior_event/normal_stop, so we always
2423 have consistent output as if the stop event had been
2424 reported. */
2425 ecs->ptid = info->ptid;
2426 ecs->event_thread = find_thread_ptid (info->ptid);
2427 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2428 ecs->ws.value.sig = TARGET_SIGNAL_0;
2429
2430 handle_inferior_event (ecs);
2431
2432 if (!ecs->wait_some_more)
2433 {
2434 struct thread_info *tp;
2435
2436 normal_stop ();
2437
2438 /* Finish off the continuations. */
2439 tp = inferior_thread ();
2440 do_all_intermediate_continuations_thread (tp, 1);
2441 do_all_continuations_thread (tp, 1);
2442 }
2443
2444 do_cleanups (old_chain);
2445 }
2446
2447 return 0;
2448 }
2449
2450 /* This function is attached as a "thread_stop_requested" observer.
2451 Cleanup local state that assumed the PTID was to be resumed, and
2452 report the stop to the frontend. */
2453
2454 static void
2455 infrun_thread_stop_requested (ptid_t ptid)
2456 {
2457 struct displaced_step_inferior_state *displaced;
2458
2459 /* PTID was requested to stop. Remove it from the displaced
2460 stepping queue, so we don't try to resume it automatically. */
2461
2462 for (displaced = displaced_step_inferior_states;
2463 displaced;
2464 displaced = displaced->next)
2465 {
2466 struct displaced_step_request *it, **prev_next_p;
2467
2468 it = displaced->step_request_queue;
2469 prev_next_p = &displaced->step_request_queue;
2470 while (it)
2471 {
2472 if (ptid_match (it->ptid, ptid))
2473 {
2474 *prev_next_p = it->next;
2475 it->next = NULL;
2476 xfree (it);
2477 }
2478 else
2479 {
2480 prev_next_p = &it->next;
2481 }
2482
2483 it = *prev_next_p;
2484 }
2485 }
2486
2487 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2488 }
2489
2490 static void
2491 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2492 {
2493 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2494 nullify_last_target_wait_ptid ();
2495 }
2496
2497 /* Callback for iterate_over_threads. */
2498
2499 static int
2500 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2501 {
2502 if (is_exited (info->ptid))
2503 return 0;
2504
2505 delete_step_resume_breakpoint (info);
2506 delete_exception_resume_breakpoint (info);
2507 return 0;
2508 }
2509
2510 /* In all-stop, delete the step resume breakpoint of any thread that
2511 had one. In non-stop, delete the step resume breakpoint of the
2512 thread that just stopped. */
2513
2514 static void
2515 delete_step_thread_step_resume_breakpoint (void)
2516 {
2517 if (!target_has_execution
2518 || ptid_equal (inferior_ptid, null_ptid))
2519 /* If the inferior has exited, we have already deleted the step
2520 resume breakpoints out of GDB's lists. */
2521 return;
2522
2523 if (non_stop)
2524 {
2525 /* If in non-stop mode, only delete the step-resume or
2526 longjmp-resume breakpoint of the thread that just stopped
2527 stepping. */
2528 struct thread_info *tp = inferior_thread ();
2529
2530 delete_step_resume_breakpoint (tp);
2531 delete_exception_resume_breakpoint (tp);
2532 }
2533 else
2534 /* In all-stop mode, delete all step-resume and longjmp-resume
2535 breakpoints of any thread that had them. */
2536 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2537 }
2538
2539 /* A cleanup wrapper. */
2540
2541 static void
2542 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2543 {
2544 delete_step_thread_step_resume_breakpoint ();
2545 }
2546
2547 /* Pretty print the results of target_wait, for debugging purposes. */
2548
2549 static void
2550 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2551 const struct target_waitstatus *ws)
2552 {
2553 char *status_string = target_waitstatus_to_string (ws);
2554 struct ui_file *tmp_stream = mem_fileopen ();
2555 char *text;
2556
2557 /* The text is split over several lines because it was getting too long.
2558 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2559 output as a unit; we want only one timestamp printed if debug_timestamp
2560 is set. */
2561
2562 fprintf_unfiltered (tmp_stream,
2563 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2564 if (PIDGET (waiton_ptid) != -1)
2565 fprintf_unfiltered (tmp_stream,
2566 " [%s]", target_pid_to_str (waiton_ptid));
2567 fprintf_unfiltered (tmp_stream, ", status) =\n");
2568 fprintf_unfiltered (tmp_stream,
2569 "infrun: %d [%s],\n",
2570 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2571 fprintf_unfiltered (tmp_stream,
2572 "infrun: %s\n",
2573 status_string);
2574
2575 text = ui_file_xstrdup (tmp_stream, NULL);
2576
2577 /* This uses %s in part to handle %'s in the text, but also to avoid
2578 a gcc error: the format attribute requires a string literal. */
2579 fprintf_unfiltered (gdb_stdlog, "%s", text);
2580
2581 xfree (status_string);
2582 xfree (text);
2583 ui_file_delete (tmp_stream);
2584 }
2585
2586 /* Prepare and stabilize the inferior for detaching it. E.g.,
2587 detaching while a thread is displaced stepping is a recipe for
2588 crashing it, as nothing would readjust the PC out of the scratch
2589 pad. */
2590
2591 void
2592 prepare_for_detach (void)
2593 {
2594 struct inferior *inf = current_inferior ();
2595 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2596 struct cleanup *old_chain_1;
2597 struct displaced_step_inferior_state *displaced;
2598
2599 displaced = get_displaced_stepping_state (inf->pid);
2600
2601 /* Is any thread of this process displaced stepping? If not,
2602 there's nothing else to do. */
2603 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2604 return;
2605
2606 if (debug_infrun)
2607 fprintf_unfiltered (gdb_stdlog,
2608 "displaced-stepping in-process while detaching");
2609
2610 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2611 inf->detaching = 1;
2612
2613 while (!ptid_equal (displaced->step_ptid, null_ptid))
2614 {
2615 struct cleanup *old_chain_2;
2616 struct execution_control_state ecss;
2617 struct execution_control_state *ecs;
2618
2619 ecs = &ecss;
2620 memset (ecs, 0, sizeof (*ecs));
2621
2622 overlay_cache_invalid = 1;
2623
2624 if (deprecated_target_wait_hook)
2625 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2626 else
2627 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2628
2629 if (debug_infrun)
2630 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2631
2632 /* If an error happens while handling the event, propagate GDB's
2633 knowledge of the executing state to the frontend/user running
2634 state. */
2635 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2636 &minus_one_ptid);
2637
2638 /* In non-stop mode, each thread is handled individually.
2639 Switch early, so the global state is set correctly for this
2640 thread. */
2641 if (non_stop
2642 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2643 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2644 context_switch (ecs->ptid);
2645
2646 /* Now figure out what to do with the result of the result. */
2647 handle_inferior_event (ecs);
2648
2649 /* No error, don't finish the state yet. */
2650 discard_cleanups (old_chain_2);
2651
2652 /* Breakpoints and watchpoints are not installed on the target
2653 at this point, and signals are passed directly to the
2654 inferior, so this must mean the process is gone. */
2655 if (!ecs->wait_some_more)
2656 {
2657 discard_cleanups (old_chain_1);
2658 error (_("Program exited while detaching"));
2659 }
2660 }
2661
2662 discard_cleanups (old_chain_1);
2663 }
2664
2665 /* Wait for control to return from inferior to debugger.
2666
2667 If inferior gets a signal, we may decide to start it up again
2668 instead of returning. That is why there is a loop in this function.
2669 When this function actually returns it means the inferior
2670 should be left stopped and GDB should read more commands. */
2671
2672 void
2673 wait_for_inferior (void)
2674 {
2675 struct cleanup *old_cleanups;
2676 struct execution_control_state ecss;
2677 struct execution_control_state *ecs;
2678
2679 if (debug_infrun)
2680 fprintf_unfiltered
2681 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2682
2683 old_cleanups =
2684 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2685
2686 ecs = &ecss;
2687 memset (ecs, 0, sizeof (*ecs));
2688
2689 while (1)
2690 {
2691 struct cleanup *old_chain;
2692
2693 overlay_cache_invalid = 1;
2694
2695 if (deprecated_target_wait_hook)
2696 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2697 else
2698 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2699
2700 if (debug_infrun)
2701 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2702
2703 /* If an error happens while handling the event, propagate GDB's
2704 knowledge of the executing state to the frontend/user running
2705 state. */
2706 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2707
2708 /* Now figure out what to do with the result of the result. */
2709 handle_inferior_event (ecs);
2710
2711 /* No error, don't finish the state yet. */
2712 discard_cleanups (old_chain);
2713
2714 if (!ecs->wait_some_more)
2715 break;
2716 }
2717
2718 do_cleanups (old_cleanups);
2719 }
2720
2721 /* Asynchronous version of wait_for_inferior. It is called by the
2722 event loop whenever a change of state is detected on the file
2723 descriptor corresponding to the target. It can be called more than
2724 once to complete a single execution command. In such cases we need
2725 to keep the state in a global variable ECSS. If it is the last time
2726 that this function is called for a single execution command, then
2727 report to the user that the inferior has stopped, and do the
2728 necessary cleanups. */
2729
2730 void
2731 fetch_inferior_event (void *client_data)
2732 {
2733 struct execution_control_state ecss;
2734 struct execution_control_state *ecs = &ecss;
2735 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2736 struct cleanup *ts_old_chain;
2737 int was_sync = sync_execution;
2738 int cmd_done = 0;
2739
2740 memset (ecs, 0, sizeof (*ecs));
2741
2742 /* We're handling a live event, so make sure we're doing live
2743 debugging. If we're looking at traceframes while the target is
2744 running, we're going to need to get back to that mode after
2745 handling the event. */
2746 if (non_stop)
2747 {
2748 make_cleanup_restore_current_traceframe ();
2749 set_current_traceframe (-1);
2750 }
2751
2752 if (non_stop)
2753 /* In non-stop mode, the user/frontend should not notice a thread
2754 switch due to internal events. Make sure we reverse to the
2755 user selected thread and frame after handling the event and
2756 running any breakpoint commands. */
2757 make_cleanup_restore_current_thread ();
2758
2759 overlay_cache_invalid = 1;
2760
2761 make_cleanup_restore_integer (&execution_direction);
2762 execution_direction = target_execution_direction ();
2763
2764 if (deprecated_target_wait_hook)
2765 ecs->ptid =
2766 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2767 else
2768 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2769
2770 if (debug_infrun)
2771 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2772
2773 if (non_stop
2774 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2775 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2776 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2777 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2778 /* In non-stop mode, each thread is handled individually. Switch
2779 early, so the global state is set correctly for this
2780 thread. */
2781 context_switch (ecs->ptid);
2782
2783 /* If an error happens while handling the event, propagate GDB's
2784 knowledge of the executing state to the frontend/user running
2785 state. */
2786 if (!non_stop)
2787 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2788 else
2789 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2790
2791 /* Get executed before make_cleanup_restore_current_thread above to apply
2792 still for the thread which has thrown the exception. */
2793 make_bpstat_clear_actions_cleanup ();
2794
2795 /* Now figure out what to do with the result of the result. */
2796 handle_inferior_event (ecs);
2797
2798 if (!ecs->wait_some_more)
2799 {
2800 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2801
2802 delete_step_thread_step_resume_breakpoint ();
2803
2804 /* We may not find an inferior if this was a process exit. */
2805 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2806 normal_stop ();
2807
2808 if (target_has_execution
2809 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
2810 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2811 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2812 && ecs->event_thread->step_multi
2813 && ecs->event_thread->control.stop_step)
2814 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2815 else
2816 {
2817 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2818 cmd_done = 1;
2819 }
2820 }
2821
2822 /* No error, don't finish the thread states yet. */
2823 discard_cleanups (ts_old_chain);
2824
2825 /* Revert thread and frame. */
2826 do_cleanups (old_chain);
2827
2828 /* If the inferior was in sync execution mode, and now isn't,
2829 restore the prompt (a synchronous execution command has finished,
2830 and we're ready for input). */
2831 if (interpreter_async && was_sync && !sync_execution)
2832 display_gdb_prompt (0);
2833
2834 if (cmd_done
2835 && !was_sync
2836 && exec_done_display_p
2837 && (ptid_equal (inferior_ptid, null_ptid)
2838 || !is_running (inferior_ptid)))
2839 printf_unfiltered (_("completed.\n"));
2840 }
2841
2842 /* Record the frame and location we're currently stepping through. */
2843 void
2844 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2845 {
2846 struct thread_info *tp = inferior_thread ();
2847
2848 tp->control.step_frame_id = get_frame_id (frame);
2849 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2850
2851 tp->current_symtab = sal.symtab;
2852 tp->current_line = sal.line;
2853 }
2854
2855 /* Clear context switchable stepping state. */
2856
2857 void
2858 init_thread_stepping_state (struct thread_info *tss)
2859 {
2860 tss->stepping_over_breakpoint = 0;
2861 tss->step_after_step_resume_breakpoint = 0;
2862 }
2863
2864 /* Return the cached copy of the last pid/waitstatus returned by
2865 target_wait()/deprecated_target_wait_hook(). The data is actually
2866 cached by handle_inferior_event(), which gets called immediately
2867 after target_wait()/deprecated_target_wait_hook(). */
2868
2869 void
2870 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2871 {
2872 *ptidp = target_last_wait_ptid;
2873 *status = target_last_waitstatus;
2874 }
2875
2876 void
2877 nullify_last_target_wait_ptid (void)
2878 {
2879 target_last_wait_ptid = minus_one_ptid;
2880 }
2881
2882 /* Switch thread contexts. */
2883
2884 static void
2885 context_switch (ptid_t ptid)
2886 {
2887 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
2888 {
2889 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2890 target_pid_to_str (inferior_ptid));
2891 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2892 target_pid_to_str (ptid));
2893 }
2894
2895 switch_to_thread (ptid);
2896 }
2897
2898 static void
2899 adjust_pc_after_break (struct execution_control_state *ecs)
2900 {
2901 struct regcache *regcache;
2902 struct gdbarch *gdbarch;
2903 struct address_space *aspace;
2904 CORE_ADDR breakpoint_pc;
2905
2906 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2907 we aren't, just return.
2908
2909 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2910 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2911 implemented by software breakpoints should be handled through the normal
2912 breakpoint layer.
2913
2914 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2915 different signals (SIGILL or SIGEMT for instance), but it is less
2916 clear where the PC is pointing afterwards. It may not match
2917 gdbarch_decr_pc_after_break. I don't know any specific target that
2918 generates these signals at breakpoints (the code has been in GDB since at
2919 least 1992) so I can not guess how to handle them here.
2920
2921 In earlier versions of GDB, a target with
2922 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2923 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2924 target with both of these set in GDB history, and it seems unlikely to be
2925 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2926
2927 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2928 return;
2929
2930 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2931 return;
2932
2933 /* In reverse execution, when a breakpoint is hit, the instruction
2934 under it has already been de-executed. The reported PC always
2935 points at the breakpoint address, so adjusting it further would
2936 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2937 architecture:
2938
2939 B1 0x08000000 : INSN1
2940 B2 0x08000001 : INSN2
2941 0x08000002 : INSN3
2942 PC -> 0x08000003 : INSN4
2943
2944 Say you're stopped at 0x08000003 as above. Reverse continuing
2945 from that point should hit B2 as below. Reading the PC when the
2946 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2947 been de-executed already.
2948
2949 B1 0x08000000 : INSN1
2950 B2 PC -> 0x08000001 : INSN2
2951 0x08000002 : INSN3
2952 0x08000003 : INSN4
2953
2954 We can't apply the same logic as for forward execution, because
2955 we would wrongly adjust the PC to 0x08000000, since there's a
2956 breakpoint at PC - 1. We'd then report a hit on B1, although
2957 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2958 behaviour. */
2959 if (execution_direction == EXEC_REVERSE)
2960 return;
2961
2962 /* If this target does not decrement the PC after breakpoints, then
2963 we have nothing to do. */
2964 regcache = get_thread_regcache (ecs->ptid);
2965 gdbarch = get_regcache_arch (regcache);
2966 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2967 return;
2968
2969 aspace = get_regcache_aspace (regcache);
2970
2971 /* Find the location where (if we've hit a breakpoint) the
2972 breakpoint would be. */
2973 breakpoint_pc = regcache_read_pc (regcache)
2974 - gdbarch_decr_pc_after_break (gdbarch);
2975
2976 /* Check whether there actually is a software breakpoint inserted at
2977 that location.
2978
2979 If in non-stop mode, a race condition is possible where we've
2980 removed a breakpoint, but stop events for that breakpoint were
2981 already queued and arrive later. To suppress those spurious
2982 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2983 and retire them after a number of stop events are reported. */
2984 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2985 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2986 {
2987 struct cleanup *old_cleanups = NULL;
2988
2989 if (RECORD_IS_USED)
2990 old_cleanups = record_gdb_operation_disable_set ();
2991
2992 /* When using hardware single-step, a SIGTRAP is reported for both
2993 a completed single-step and a software breakpoint. Need to
2994 differentiate between the two, as the latter needs adjusting
2995 but the former does not.
2996
2997 The SIGTRAP can be due to a completed hardware single-step only if
2998 - we didn't insert software single-step breakpoints
2999 - the thread to be examined is still the current thread
3000 - this thread is currently being stepped
3001
3002 If any of these events did not occur, we must have stopped due
3003 to hitting a software breakpoint, and have to back up to the
3004 breakpoint address.
3005
3006 As a special case, we could have hardware single-stepped a
3007 software breakpoint. In this case (prev_pc == breakpoint_pc),
3008 we also need to back up to the breakpoint address. */
3009
3010 if (singlestep_breakpoints_inserted_p
3011 || !ptid_equal (ecs->ptid, inferior_ptid)
3012 || !currently_stepping (ecs->event_thread)
3013 || ecs->event_thread->prev_pc == breakpoint_pc)
3014 regcache_write_pc (regcache, breakpoint_pc);
3015
3016 if (RECORD_IS_USED)
3017 do_cleanups (old_cleanups);
3018 }
3019 }
3020
3021 void
3022 init_infwait_state (void)
3023 {
3024 waiton_ptid = pid_to_ptid (-1);
3025 infwait_state = infwait_normal_state;
3026 }
3027
3028 void
3029 error_is_running (void)
3030 {
3031 error (_("Cannot execute this command while "
3032 "the selected thread is running."));
3033 }
3034
3035 void
3036 ensure_not_running (void)
3037 {
3038 if (is_running (inferior_ptid))
3039 error_is_running ();
3040 }
3041
3042 static int
3043 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3044 {
3045 for (frame = get_prev_frame (frame);
3046 frame != NULL;
3047 frame = get_prev_frame (frame))
3048 {
3049 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3050 return 1;
3051 if (get_frame_type (frame) != INLINE_FRAME)
3052 break;
3053 }
3054
3055 return 0;
3056 }
3057
3058 /* Auxiliary function that handles syscall entry/return events.
3059 It returns 1 if the inferior should keep going (and GDB
3060 should ignore the event), or 0 if the event deserves to be
3061 processed. */
3062
3063 static int
3064 handle_syscall_event (struct execution_control_state *ecs)
3065 {
3066 struct regcache *regcache;
3067 struct gdbarch *gdbarch;
3068 int syscall_number;
3069
3070 if (!ptid_equal (ecs->ptid, inferior_ptid))
3071 context_switch (ecs->ptid);
3072
3073 regcache = get_thread_regcache (ecs->ptid);
3074 gdbarch = get_regcache_arch (regcache);
3075 syscall_number = ecs->ws.value.syscall_number;
3076 stop_pc = regcache_read_pc (regcache);
3077
3078 if (catch_syscall_enabled () > 0
3079 && catching_syscall_number (syscall_number) > 0)
3080 {
3081 if (debug_infrun)
3082 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3083 syscall_number);
3084
3085 ecs->event_thread->control.stop_bpstat
3086 = bpstat_stop_status (get_regcache_aspace (regcache),
3087 stop_pc, ecs->ptid, &ecs->ws);
3088 ecs->random_signal
3089 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3090
3091 if (!ecs->random_signal)
3092 {
3093 /* Catchpoint hit. */
3094 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3095 return 0;
3096 }
3097 }
3098
3099 /* If no catchpoint triggered for this, then keep going. */
3100 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3101 keep_going (ecs);
3102 return 1;
3103 }
3104
3105 /* Clear the supplied execution_control_state's stop_func_* fields. */
3106
3107 static void
3108 clear_stop_func (struct execution_control_state *ecs)
3109 {
3110 ecs->stop_func_filled_in = 0;
3111 ecs->stop_func_start = 0;
3112 ecs->stop_func_end = 0;
3113 ecs->stop_func_name = NULL;
3114 }
3115
3116 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3117
3118 static void
3119 fill_in_stop_func (struct gdbarch *gdbarch,
3120 struct execution_control_state *ecs)
3121 {
3122 if (!ecs->stop_func_filled_in)
3123 {
3124 /* Don't care about return value; stop_func_start and stop_func_name
3125 will both be 0 if it doesn't work. */
3126 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3127 &ecs->stop_func_start, &ecs->stop_func_end);
3128 ecs->stop_func_start
3129 += gdbarch_deprecated_function_start_offset (gdbarch);
3130
3131 ecs->stop_func_filled_in = 1;
3132 }
3133 }
3134
3135 /* Given an execution control state that has been freshly filled in
3136 by an event from the inferior, figure out what it means and take
3137 appropriate action. */
3138
3139 static void
3140 handle_inferior_event (struct execution_control_state *ecs)
3141 {
3142 struct frame_info *frame;
3143 struct gdbarch *gdbarch;
3144 int stopped_by_watchpoint;
3145 int stepped_after_stopped_by_watchpoint = 0;
3146 struct symtab_and_line stop_pc_sal;
3147 enum stop_kind stop_soon;
3148
3149 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3150 {
3151 /* We had an event in the inferior, but we are not interested in
3152 handling it at this level. The lower layers have already
3153 done what needs to be done, if anything.
3154
3155 One of the possible circumstances for this is when the
3156 inferior produces output for the console. The inferior has
3157 not stopped, and we are ignoring the event. Another possible
3158 circumstance is any event which the lower level knows will be
3159 reported multiple times without an intervening resume. */
3160 if (debug_infrun)
3161 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3162 prepare_to_wait (ecs);
3163 return;
3164 }
3165
3166 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3167 && target_can_async_p () && !sync_execution)
3168 {
3169 /* There were no unwaited-for children left in the target, but,
3170 we're not synchronously waiting for events either. Just
3171 ignore. Otherwise, if we were running a synchronous
3172 execution command, we need to cancel it and give the user
3173 back the terminal. */
3174 if (debug_infrun)
3175 fprintf_unfiltered (gdb_stdlog,
3176 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3177 prepare_to_wait (ecs);
3178 return;
3179 }
3180
3181 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3182 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3183 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED)
3184 {
3185 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3186
3187 gdb_assert (inf);
3188 stop_soon = inf->control.stop_soon;
3189 }
3190 else
3191 stop_soon = NO_STOP_QUIETLY;
3192
3193 /* Cache the last pid/waitstatus. */
3194 target_last_wait_ptid = ecs->ptid;
3195 target_last_waitstatus = ecs->ws;
3196
3197 /* Always clear state belonging to the previous time we stopped. */
3198 stop_stack_dummy = STOP_NONE;
3199
3200 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3201 {
3202 /* No unwaited-for children left. IOW, all resumed children
3203 have exited. */
3204 if (debug_infrun)
3205 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3206
3207 stop_print_frame = 0;
3208 stop_stepping (ecs);
3209 return;
3210 }
3211
3212 /* If it's a new process, add it to the thread database. */
3213
3214 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3215 && !ptid_equal (ecs->ptid, minus_one_ptid)
3216 && !in_thread_list (ecs->ptid));
3217
3218 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3219 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3220 add_thread (ecs->ptid);
3221
3222 ecs->event_thread = find_thread_ptid (ecs->ptid);
3223
3224 /* Dependent on valid ECS->EVENT_THREAD. */
3225 adjust_pc_after_break (ecs);
3226
3227 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3228 reinit_frame_cache ();
3229
3230 breakpoint_retire_moribund ();
3231
3232 /* First, distinguish signals caused by the debugger from signals
3233 that have to do with the program's own actions. Note that
3234 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3235 on the operating system version. Here we detect when a SIGILL or
3236 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3237 something similar for SIGSEGV, since a SIGSEGV will be generated
3238 when we're trying to execute a breakpoint instruction on a
3239 non-executable stack. This happens for call dummy breakpoints
3240 for architectures like SPARC that place call dummies on the
3241 stack. */
3242 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3243 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3244 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3245 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3246 {
3247 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3248
3249 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3250 regcache_read_pc (regcache)))
3251 {
3252 if (debug_infrun)
3253 fprintf_unfiltered (gdb_stdlog,
3254 "infrun: Treating signal as SIGTRAP\n");
3255 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3256 }
3257 }
3258
3259 /* Mark the non-executing threads accordingly. In all-stop, all
3260 threads of all processes are stopped when we get any event
3261 reported. In non-stop mode, only the event thread stops. If
3262 we're handling a process exit in non-stop mode, there's nothing
3263 to do, as threads of the dead process are gone, and threads of
3264 any other process were left running. */
3265 if (!non_stop)
3266 set_executing (minus_one_ptid, 0);
3267 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3268 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3269 set_executing (ecs->ptid, 0);
3270
3271 switch (infwait_state)
3272 {
3273 case infwait_thread_hop_state:
3274 if (debug_infrun)
3275 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3276 break;
3277
3278 case infwait_normal_state:
3279 if (debug_infrun)
3280 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3281 break;
3282
3283 case infwait_step_watch_state:
3284 if (debug_infrun)
3285 fprintf_unfiltered (gdb_stdlog,
3286 "infrun: infwait_step_watch_state\n");
3287
3288 stepped_after_stopped_by_watchpoint = 1;
3289 break;
3290
3291 case infwait_nonstep_watch_state:
3292 if (debug_infrun)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "infrun: infwait_nonstep_watch_state\n");
3295 insert_breakpoints ();
3296
3297 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3298 handle things like signals arriving and other things happening
3299 in combination correctly? */
3300 stepped_after_stopped_by_watchpoint = 1;
3301 break;
3302
3303 default:
3304 internal_error (__FILE__, __LINE__, _("bad switch"));
3305 }
3306
3307 infwait_state = infwait_normal_state;
3308 waiton_ptid = pid_to_ptid (-1);
3309
3310 switch (ecs->ws.kind)
3311 {
3312 case TARGET_WAITKIND_LOADED:
3313 if (debug_infrun)
3314 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3315 /* Ignore gracefully during startup of the inferior, as it might
3316 be the shell which has just loaded some objects, otherwise
3317 add the symbols for the newly loaded objects. Also ignore at
3318 the beginning of an attach or remote session; we will query
3319 the full list of libraries once the connection is
3320 established. */
3321 if (stop_soon == NO_STOP_QUIETLY)
3322 {
3323 struct regcache *regcache;
3324
3325 if (!ptid_equal (ecs->ptid, inferior_ptid))
3326 context_switch (ecs->ptid);
3327 regcache = get_thread_regcache (ecs->ptid);
3328
3329 handle_solib_event ();
3330
3331 ecs->event_thread->control.stop_bpstat
3332 = bpstat_stop_status (get_regcache_aspace (regcache),
3333 stop_pc, ecs->ptid, &ecs->ws);
3334 ecs->random_signal
3335 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3336
3337 if (!ecs->random_signal)
3338 {
3339 /* A catchpoint triggered. */
3340 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3341 goto process_event_stop_test;
3342 }
3343
3344 /* If requested, stop when the dynamic linker notifies
3345 gdb of events. This allows the user to get control
3346 and place breakpoints in initializer routines for
3347 dynamically loaded objects (among other things). */
3348 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3349 if (stop_on_solib_events)
3350 {
3351 /* Make sure we print "Stopped due to solib-event" in
3352 normal_stop. */
3353 stop_print_frame = 1;
3354
3355 stop_stepping (ecs);
3356 return;
3357 }
3358 }
3359
3360 /* If we are skipping through a shell, or through shared library
3361 loading that we aren't interested in, resume the program. If
3362 we're running the program normally, also resume. But stop if
3363 we're attaching or setting up a remote connection. */
3364 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3365 {
3366 /* Loading of shared libraries might have changed breakpoint
3367 addresses. Make sure new breakpoints are inserted. */
3368 if (stop_soon == NO_STOP_QUIETLY
3369 && !breakpoints_always_inserted_mode ())
3370 insert_breakpoints ();
3371 resume (0, TARGET_SIGNAL_0);
3372 prepare_to_wait (ecs);
3373 return;
3374 }
3375
3376 break;
3377
3378 case TARGET_WAITKIND_SPURIOUS:
3379 if (debug_infrun)
3380 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3381 resume (0, TARGET_SIGNAL_0);
3382 prepare_to_wait (ecs);
3383 return;
3384
3385 case TARGET_WAITKIND_EXITED:
3386 if (debug_infrun)
3387 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3388 inferior_ptid = ecs->ptid;
3389 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3390 set_current_program_space (current_inferior ()->pspace);
3391 handle_vfork_child_exec_or_exit (0);
3392 target_terminal_ours (); /* Must do this before mourn anyway. */
3393 print_exited_reason (ecs->ws.value.integer);
3394
3395 /* Record the exit code in the convenience variable $_exitcode, so
3396 that the user can inspect this again later. */
3397 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3398 (LONGEST) ecs->ws.value.integer);
3399
3400 /* Also record this in the inferior itself. */
3401 current_inferior ()->has_exit_code = 1;
3402 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3403
3404 gdb_flush (gdb_stdout);
3405 target_mourn_inferior ();
3406 singlestep_breakpoints_inserted_p = 0;
3407 cancel_single_step_breakpoints ();
3408 stop_print_frame = 0;
3409 stop_stepping (ecs);
3410 return;
3411
3412 case TARGET_WAITKIND_SIGNALLED:
3413 if (debug_infrun)
3414 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3415 inferior_ptid = ecs->ptid;
3416 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3417 set_current_program_space (current_inferior ()->pspace);
3418 handle_vfork_child_exec_or_exit (0);
3419 stop_print_frame = 0;
3420 target_terminal_ours (); /* Must do this before mourn anyway. */
3421
3422 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3423 reach here unless the inferior is dead. However, for years
3424 target_kill() was called here, which hints that fatal signals aren't
3425 really fatal on some systems. If that's true, then some changes
3426 may be needed. */
3427 target_mourn_inferior ();
3428
3429 print_signal_exited_reason (ecs->ws.value.sig);
3430 singlestep_breakpoints_inserted_p = 0;
3431 cancel_single_step_breakpoints ();
3432 stop_stepping (ecs);
3433 return;
3434
3435 /* The following are the only cases in which we keep going;
3436 the above cases end in a continue or goto. */
3437 case TARGET_WAITKIND_FORKED:
3438 case TARGET_WAITKIND_VFORKED:
3439 if (debug_infrun)
3440 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3441
3442 /* Check whether the inferior is displaced stepping. */
3443 {
3444 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3445 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3446 struct displaced_step_inferior_state *displaced
3447 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3448
3449 /* If checking displaced stepping is supported, and thread
3450 ecs->ptid is displaced stepping. */
3451 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3452 {
3453 struct inferior *parent_inf
3454 = find_inferior_pid (ptid_get_pid (ecs->ptid));
3455 struct regcache *child_regcache;
3456 CORE_ADDR parent_pc;
3457
3458 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
3459 indicating that the displaced stepping of syscall instruction
3460 has been done. Perform cleanup for parent process here. Note
3461 that this operation also cleans up the child process for vfork,
3462 because their pages are shared. */
3463 displaced_step_fixup (ecs->ptid, TARGET_SIGNAL_TRAP);
3464
3465 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3466 {
3467 /* Restore scratch pad for child process. */
3468 displaced_step_restore (displaced, ecs->ws.value.related_pid);
3469 }
3470
3471 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
3472 the child's PC is also within the scratchpad. Set the child's PC
3473 to the parent's PC value, which has already been fixed up.
3474 FIXME: we use the parent's aspace here, although we're touching
3475 the child, because the child hasn't been added to the inferior
3476 list yet at this point. */
3477
3478 child_regcache
3479 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
3480 gdbarch,
3481 parent_inf->aspace);
3482 /* Read PC value of parent process. */
3483 parent_pc = regcache_read_pc (regcache);
3484
3485 if (debug_displaced)
3486 fprintf_unfiltered (gdb_stdlog,
3487 "displaced: write child pc from %s to %s\n",
3488 paddress (gdbarch,
3489 regcache_read_pc (child_regcache)),
3490 paddress (gdbarch, parent_pc));
3491
3492 regcache_write_pc (child_regcache, parent_pc);
3493 }
3494 }
3495
3496 if (!ptid_equal (ecs->ptid, inferior_ptid))
3497 {
3498 context_switch (ecs->ptid);
3499 reinit_frame_cache ();
3500 }
3501
3502 /* Immediately detach breakpoints from the child before there's
3503 any chance of letting the user delete breakpoints from the
3504 breakpoint lists. If we don't do this early, it's easy to
3505 leave left over traps in the child, vis: "break foo; catch
3506 fork; c; <fork>; del; c; <child calls foo>". We only follow
3507 the fork on the last `continue', and by that time the
3508 breakpoint at "foo" is long gone from the breakpoint table.
3509 If we vforked, then we don't need to unpatch here, since both
3510 parent and child are sharing the same memory pages; we'll
3511 need to unpatch at follow/detach time instead to be certain
3512 that new breakpoints added between catchpoint hit time and
3513 vfork follow are detached. */
3514 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3515 {
3516 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3517
3518 /* This won't actually modify the breakpoint list, but will
3519 physically remove the breakpoints from the child. */
3520 detach_breakpoints (child_pid);
3521 }
3522
3523 if (singlestep_breakpoints_inserted_p)
3524 {
3525 /* Pull the single step breakpoints out of the target. */
3526 remove_single_step_breakpoints ();
3527 singlestep_breakpoints_inserted_p = 0;
3528 }
3529
3530 /* In case the event is caught by a catchpoint, remember that
3531 the event is to be followed at the next resume of the thread,
3532 and not immediately. */
3533 ecs->event_thread->pending_follow = ecs->ws;
3534
3535 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3536
3537 ecs->event_thread->control.stop_bpstat
3538 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3539 stop_pc, ecs->ptid, &ecs->ws);
3540
3541 /* Note that we're interested in knowing the bpstat actually
3542 causes a stop, not just if it may explain the signal.
3543 Software watchpoints, for example, always appear in the
3544 bpstat. */
3545 ecs->random_signal
3546 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3547
3548 /* If no catchpoint triggered for this, then keep going. */
3549 if (ecs->random_signal)
3550 {
3551 ptid_t parent;
3552 ptid_t child;
3553 int should_resume;
3554 int follow_child
3555 = (follow_fork_mode_string == follow_fork_mode_child);
3556
3557 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3558
3559 should_resume = follow_fork ();
3560
3561 parent = ecs->ptid;
3562 child = ecs->ws.value.related_pid;
3563
3564 /* In non-stop mode, also resume the other branch. */
3565 if (non_stop && !detach_fork)
3566 {
3567 if (follow_child)
3568 switch_to_thread (parent);
3569 else
3570 switch_to_thread (child);
3571
3572 ecs->event_thread = inferior_thread ();
3573 ecs->ptid = inferior_ptid;
3574 keep_going (ecs);
3575 }
3576
3577 if (follow_child)
3578 switch_to_thread (child);
3579 else
3580 switch_to_thread (parent);
3581
3582 ecs->event_thread = inferior_thread ();
3583 ecs->ptid = inferior_ptid;
3584
3585 if (should_resume)
3586 keep_going (ecs);
3587 else
3588 stop_stepping (ecs);
3589 return;
3590 }
3591 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3592 goto process_event_stop_test;
3593
3594 case TARGET_WAITKIND_VFORK_DONE:
3595 /* Done with the shared memory region. Re-insert breakpoints in
3596 the parent, and keep going. */
3597
3598 if (debug_infrun)
3599 fprintf_unfiltered (gdb_stdlog,
3600 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3601
3602 if (!ptid_equal (ecs->ptid, inferior_ptid))
3603 context_switch (ecs->ptid);
3604
3605 current_inferior ()->waiting_for_vfork_done = 0;
3606 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3607 /* This also takes care of reinserting breakpoints in the
3608 previously locked inferior. */
3609 keep_going (ecs);
3610 return;
3611
3612 case TARGET_WAITKIND_EXECD:
3613 if (debug_infrun)
3614 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3615
3616 if (!ptid_equal (ecs->ptid, inferior_ptid))
3617 {
3618 context_switch (ecs->ptid);
3619 reinit_frame_cache ();
3620 }
3621
3622 singlestep_breakpoints_inserted_p = 0;
3623 cancel_single_step_breakpoints ();
3624
3625 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3626
3627 /* Do whatever is necessary to the parent branch of the vfork. */
3628 handle_vfork_child_exec_or_exit (1);
3629
3630 /* This causes the eventpoints and symbol table to be reset.
3631 Must do this now, before trying to determine whether to
3632 stop. */
3633 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3634
3635 ecs->event_thread->control.stop_bpstat
3636 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3637 stop_pc, ecs->ptid, &ecs->ws);
3638 ecs->random_signal
3639 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3640
3641 /* Note that this may be referenced from inside
3642 bpstat_stop_status above, through inferior_has_execd. */
3643 xfree (ecs->ws.value.execd_pathname);
3644 ecs->ws.value.execd_pathname = NULL;
3645
3646 /* If no catchpoint triggered for this, then keep going. */
3647 if (ecs->random_signal)
3648 {
3649 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3650 keep_going (ecs);
3651 return;
3652 }
3653 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3654 goto process_event_stop_test;
3655
3656 /* Be careful not to try to gather much state about a thread
3657 that's in a syscall. It's frequently a losing proposition. */
3658 case TARGET_WAITKIND_SYSCALL_ENTRY:
3659 if (debug_infrun)
3660 fprintf_unfiltered (gdb_stdlog,
3661 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3662 /* Getting the current syscall number. */
3663 if (handle_syscall_event (ecs) != 0)
3664 return;
3665 goto process_event_stop_test;
3666
3667 /* Before examining the threads further, step this thread to
3668 get it entirely out of the syscall. (We get notice of the
3669 event when the thread is just on the verge of exiting a
3670 syscall. Stepping one instruction seems to get it back
3671 into user code.) */
3672 case TARGET_WAITKIND_SYSCALL_RETURN:
3673 if (debug_infrun)
3674 fprintf_unfiltered (gdb_stdlog,
3675 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3676 if (handle_syscall_event (ecs) != 0)
3677 return;
3678 goto process_event_stop_test;
3679
3680 case TARGET_WAITKIND_STOPPED:
3681 if (debug_infrun)
3682 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3683 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3684 break;
3685
3686 case TARGET_WAITKIND_NO_HISTORY:
3687 if (debug_infrun)
3688 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
3689 /* Reverse execution: target ran out of history info. */
3690 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3691 print_no_history_reason ();
3692 stop_stepping (ecs);
3693 return;
3694 }
3695
3696 if (ecs->new_thread_event)
3697 {
3698 if (non_stop)
3699 /* Non-stop assumes that the target handles adding new threads
3700 to the thread list. */
3701 internal_error (__FILE__, __LINE__,
3702 "targets should add new threads to the thread "
3703 "list themselves in non-stop mode.");
3704
3705 /* We may want to consider not doing a resume here in order to
3706 give the user a chance to play with the new thread. It might
3707 be good to make that a user-settable option. */
3708
3709 /* At this point, all threads are stopped (happens automatically
3710 in either the OS or the native code). Therefore we need to
3711 continue all threads in order to make progress. */
3712
3713 if (!ptid_equal (ecs->ptid, inferior_ptid))
3714 context_switch (ecs->ptid);
3715 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3716 prepare_to_wait (ecs);
3717 return;
3718 }
3719
3720 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3721 {
3722 /* Do we need to clean up the state of a thread that has
3723 completed a displaced single-step? (Doing so usually affects
3724 the PC, so do it here, before we set stop_pc.) */
3725 displaced_step_fixup (ecs->ptid,
3726 ecs->event_thread->suspend.stop_signal);
3727
3728 /* If we either finished a single-step or hit a breakpoint, but
3729 the user wanted this thread to be stopped, pretend we got a
3730 SIG0 (generic unsignaled stop). */
3731
3732 if (ecs->event_thread->stop_requested
3733 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3734 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3735 }
3736
3737 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3738
3739 if (debug_infrun)
3740 {
3741 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3742 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3743 struct cleanup *old_chain = save_inferior_ptid ();
3744
3745 inferior_ptid = ecs->ptid;
3746
3747 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3748 paddress (gdbarch, stop_pc));
3749 if (target_stopped_by_watchpoint ())
3750 {
3751 CORE_ADDR addr;
3752
3753 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3754
3755 if (target_stopped_data_address (&current_target, &addr))
3756 fprintf_unfiltered (gdb_stdlog,
3757 "infrun: stopped data address = %s\n",
3758 paddress (gdbarch, addr));
3759 else
3760 fprintf_unfiltered (gdb_stdlog,
3761 "infrun: (no data address available)\n");
3762 }
3763
3764 do_cleanups (old_chain);
3765 }
3766
3767 if (stepping_past_singlestep_breakpoint)
3768 {
3769 gdb_assert (singlestep_breakpoints_inserted_p);
3770 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3771 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3772
3773 stepping_past_singlestep_breakpoint = 0;
3774
3775 /* We've either finished single-stepping past the single-step
3776 breakpoint, or stopped for some other reason. It would be nice if
3777 we could tell, but we can't reliably. */
3778 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3779 {
3780 if (debug_infrun)
3781 fprintf_unfiltered (gdb_stdlog,
3782 "infrun: stepping_past_"
3783 "singlestep_breakpoint\n");
3784 /* Pull the single step breakpoints out of the target. */
3785 remove_single_step_breakpoints ();
3786 singlestep_breakpoints_inserted_p = 0;
3787
3788 ecs->random_signal = 0;
3789 ecs->event_thread->control.trap_expected = 0;
3790
3791 context_switch (saved_singlestep_ptid);
3792 if (deprecated_context_hook)
3793 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3794
3795 resume (1, TARGET_SIGNAL_0);
3796 prepare_to_wait (ecs);
3797 return;
3798 }
3799 }
3800
3801 if (!ptid_equal (deferred_step_ptid, null_ptid))
3802 {
3803 /* In non-stop mode, there's never a deferred_step_ptid set. */
3804 gdb_assert (!non_stop);
3805
3806 /* If we stopped for some other reason than single-stepping, ignore
3807 the fact that we were supposed to switch back. */
3808 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3809 {
3810 if (debug_infrun)
3811 fprintf_unfiltered (gdb_stdlog,
3812 "infrun: handling deferred step\n");
3813
3814 /* Pull the single step breakpoints out of the target. */
3815 if (singlestep_breakpoints_inserted_p)
3816 {
3817 remove_single_step_breakpoints ();
3818 singlestep_breakpoints_inserted_p = 0;
3819 }
3820
3821 ecs->event_thread->control.trap_expected = 0;
3822
3823 /* Note: We do not call context_switch at this point, as the
3824 context is already set up for stepping the original thread. */
3825 switch_to_thread (deferred_step_ptid);
3826 deferred_step_ptid = null_ptid;
3827 /* Suppress spurious "Switching to ..." message. */
3828 previous_inferior_ptid = inferior_ptid;
3829
3830 resume (1, TARGET_SIGNAL_0);
3831 prepare_to_wait (ecs);
3832 return;
3833 }
3834
3835 deferred_step_ptid = null_ptid;
3836 }
3837
3838 /* See if a thread hit a thread-specific breakpoint that was meant for
3839 another thread. If so, then step that thread past the breakpoint,
3840 and continue it. */
3841
3842 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3843 {
3844 int thread_hop_needed = 0;
3845 struct address_space *aspace =
3846 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3847
3848 /* Check if a regular breakpoint has been hit before checking
3849 for a potential single step breakpoint. Otherwise, GDB will
3850 not see this breakpoint hit when stepping onto breakpoints. */
3851 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3852 {
3853 ecs->random_signal = 0;
3854 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3855 thread_hop_needed = 1;
3856 }
3857 else if (singlestep_breakpoints_inserted_p)
3858 {
3859 /* We have not context switched yet, so this should be true
3860 no matter which thread hit the singlestep breakpoint. */
3861 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3862 if (debug_infrun)
3863 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3864 "trap for %s\n",
3865 target_pid_to_str (ecs->ptid));
3866
3867 ecs->random_signal = 0;
3868 /* The call to in_thread_list is necessary because PTIDs sometimes
3869 change when we go from single-threaded to multi-threaded. If
3870 the singlestep_ptid is still in the list, assume that it is
3871 really different from ecs->ptid. */
3872 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3873 && in_thread_list (singlestep_ptid))
3874 {
3875 /* If the PC of the thread we were trying to single-step
3876 has changed, discard this event (which we were going
3877 to ignore anyway), and pretend we saw that thread
3878 trap. This prevents us continuously moving the
3879 single-step breakpoint forward, one instruction at a
3880 time. If the PC has changed, then the thread we were
3881 trying to single-step has trapped or been signalled,
3882 but the event has not been reported to GDB yet.
3883
3884 There might be some cases where this loses signal
3885 information, if a signal has arrived at exactly the
3886 same time that the PC changed, but this is the best
3887 we can do with the information available. Perhaps we
3888 should arrange to report all events for all threads
3889 when they stop, or to re-poll the remote looking for
3890 this particular thread (i.e. temporarily enable
3891 schedlock). */
3892
3893 CORE_ADDR new_singlestep_pc
3894 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3895
3896 if (new_singlestep_pc != singlestep_pc)
3897 {
3898 enum target_signal stop_signal;
3899
3900 if (debug_infrun)
3901 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3902 " but expected thread advanced also\n");
3903
3904 /* The current context still belongs to
3905 singlestep_ptid. Don't swap here, since that's
3906 the context we want to use. Just fudge our
3907 state and continue. */
3908 stop_signal = ecs->event_thread->suspend.stop_signal;
3909 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3910 ecs->ptid = singlestep_ptid;
3911 ecs->event_thread = find_thread_ptid (ecs->ptid);
3912 ecs->event_thread->suspend.stop_signal = stop_signal;
3913 stop_pc = new_singlestep_pc;
3914 }
3915 else
3916 {
3917 if (debug_infrun)
3918 fprintf_unfiltered (gdb_stdlog,
3919 "infrun: unexpected thread\n");
3920
3921 thread_hop_needed = 1;
3922 stepping_past_singlestep_breakpoint = 1;
3923 saved_singlestep_ptid = singlestep_ptid;
3924 }
3925 }
3926 }
3927
3928 if (thread_hop_needed)
3929 {
3930 struct regcache *thread_regcache;
3931 int remove_status = 0;
3932
3933 if (debug_infrun)
3934 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3935
3936 /* Switch context before touching inferior memory, the
3937 previous thread may have exited. */
3938 if (!ptid_equal (inferior_ptid, ecs->ptid))
3939 context_switch (ecs->ptid);
3940
3941 /* Saw a breakpoint, but it was hit by the wrong thread.
3942 Just continue. */
3943
3944 if (singlestep_breakpoints_inserted_p)
3945 {
3946 /* Pull the single step breakpoints out of the target. */
3947 remove_single_step_breakpoints ();
3948 singlestep_breakpoints_inserted_p = 0;
3949 }
3950
3951 /* If the arch can displace step, don't remove the
3952 breakpoints. */
3953 thread_regcache = get_thread_regcache (ecs->ptid);
3954 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3955 remove_status = remove_breakpoints ();
3956
3957 /* Did we fail to remove breakpoints? If so, try
3958 to set the PC past the bp. (There's at least
3959 one situation in which we can fail to remove
3960 the bp's: On HP-UX's that use ttrace, we can't
3961 change the address space of a vforking child
3962 process until the child exits (well, okay, not
3963 then either :-) or execs. */
3964 if (remove_status != 0)
3965 error (_("Cannot step over breakpoint hit in wrong thread"));
3966 else
3967 { /* Single step */
3968 if (!non_stop)
3969 {
3970 /* Only need to require the next event from this
3971 thread in all-stop mode. */
3972 waiton_ptid = ecs->ptid;
3973 infwait_state = infwait_thread_hop_state;
3974 }
3975
3976 ecs->event_thread->stepping_over_breakpoint = 1;
3977 keep_going (ecs);
3978 return;
3979 }
3980 }
3981 else if (singlestep_breakpoints_inserted_p)
3982 {
3983 ecs->random_signal = 0;
3984 }
3985 }
3986 else
3987 ecs->random_signal = 1;
3988
3989 /* See if something interesting happened to the non-current thread. If
3990 so, then switch to that thread. */
3991 if (!ptid_equal (ecs->ptid, inferior_ptid))
3992 {
3993 if (debug_infrun)
3994 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3995
3996 context_switch (ecs->ptid);
3997
3998 if (deprecated_context_hook)
3999 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4000 }
4001
4002 /* At this point, get hold of the now-current thread's frame. */
4003 frame = get_current_frame ();
4004 gdbarch = get_frame_arch (frame);
4005
4006 if (singlestep_breakpoints_inserted_p)
4007 {
4008 /* Pull the single step breakpoints out of the target. */
4009 remove_single_step_breakpoints ();
4010 singlestep_breakpoints_inserted_p = 0;
4011 }
4012
4013 if (stepped_after_stopped_by_watchpoint)
4014 stopped_by_watchpoint = 0;
4015 else
4016 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4017
4018 /* If necessary, step over this watchpoint. We'll be back to display
4019 it in a moment. */
4020 if (stopped_by_watchpoint
4021 && (target_have_steppable_watchpoint
4022 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4023 {
4024 /* At this point, we are stopped at an instruction which has
4025 attempted to write to a piece of memory under control of
4026 a watchpoint. The instruction hasn't actually executed
4027 yet. If we were to evaluate the watchpoint expression
4028 now, we would get the old value, and therefore no change
4029 would seem to have occurred.
4030
4031 In order to make watchpoints work `right', we really need
4032 to complete the memory write, and then evaluate the
4033 watchpoint expression. We do this by single-stepping the
4034 target.
4035
4036 It may not be necessary to disable the watchpoint to stop over
4037 it. For example, the PA can (with some kernel cooperation)
4038 single step over a watchpoint without disabling the watchpoint.
4039
4040 It is far more common to need to disable a watchpoint to step
4041 the inferior over it. If we have non-steppable watchpoints,
4042 we must disable the current watchpoint; it's simplest to
4043 disable all watchpoints and breakpoints. */
4044 int hw_step = 1;
4045
4046 if (!target_have_steppable_watchpoint)
4047 {
4048 remove_breakpoints ();
4049 /* See comment in resume why we need to stop bypassing signals
4050 while breakpoints have been removed. */
4051 target_pass_signals (0, NULL);
4052 }
4053 /* Single step */
4054 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
4055 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
4056 waiton_ptid = ecs->ptid;
4057 if (target_have_steppable_watchpoint)
4058 infwait_state = infwait_step_watch_state;
4059 else
4060 infwait_state = infwait_nonstep_watch_state;
4061 prepare_to_wait (ecs);
4062 return;
4063 }
4064
4065 clear_stop_func (ecs);
4066 ecs->event_thread->stepping_over_breakpoint = 0;
4067 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4068 ecs->event_thread->control.stop_step = 0;
4069 stop_print_frame = 1;
4070 ecs->random_signal = 0;
4071 stopped_by_random_signal = 0;
4072
4073 /* Hide inlined functions starting here, unless we just performed stepi or
4074 nexti. After stepi and nexti, always show the innermost frame (not any
4075 inline function call sites). */
4076 if (ecs->event_thread->control.step_range_end != 1)
4077 {
4078 struct address_space *aspace =
4079 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4080
4081 /* skip_inline_frames is expensive, so we avoid it if we can
4082 determine that the address is one where functions cannot have
4083 been inlined. This improves performance with inferiors that
4084 load a lot of shared libraries, because the solib event
4085 breakpoint is defined as the address of a function (i.e. not
4086 inline). Note that we have to check the previous PC as well
4087 as the current one to catch cases when we have just
4088 single-stepped off a breakpoint prior to reinstating it.
4089 Note that we're assuming that the code we single-step to is
4090 not inline, but that's not definitive: there's nothing
4091 preventing the event breakpoint function from containing
4092 inlined code, and the single-step ending up there. If the
4093 user had set a breakpoint on that inlined code, the missing
4094 skip_inline_frames call would break things. Fortunately
4095 that's an extremely unlikely scenario. */
4096 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4097 && !(ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4098 && ecs->event_thread->control.trap_expected
4099 && pc_at_non_inline_function (aspace,
4100 ecs->event_thread->prev_pc,
4101 &ecs->ws)))
4102 skip_inline_frames (ecs->ptid);
4103 }
4104
4105 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4106 && ecs->event_thread->control.trap_expected
4107 && gdbarch_single_step_through_delay_p (gdbarch)
4108 && currently_stepping (ecs->event_thread))
4109 {
4110 /* We're trying to step off a breakpoint. Turns out that we're
4111 also on an instruction that needs to be stepped multiple
4112 times before it's been fully executing. E.g., architectures
4113 with a delay slot. It needs to be stepped twice, once for
4114 the instruction and once for the delay slot. */
4115 int step_through_delay
4116 = gdbarch_single_step_through_delay (gdbarch, frame);
4117
4118 if (debug_infrun && step_through_delay)
4119 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4120 if (ecs->event_thread->control.step_range_end == 0
4121 && step_through_delay)
4122 {
4123 /* The user issued a continue when stopped at a breakpoint.
4124 Set up for another trap and get out of here. */
4125 ecs->event_thread->stepping_over_breakpoint = 1;
4126 keep_going (ecs);
4127 return;
4128 }
4129 else if (step_through_delay)
4130 {
4131 /* The user issued a step when stopped at a breakpoint.
4132 Maybe we should stop, maybe we should not - the delay
4133 slot *might* correspond to a line of source. In any
4134 case, don't decide that here, just set
4135 ecs->stepping_over_breakpoint, making sure we
4136 single-step again before breakpoints are re-inserted. */
4137 ecs->event_thread->stepping_over_breakpoint = 1;
4138 }
4139 }
4140
4141 /* Look at the cause of the stop, and decide what to do.
4142 The alternatives are:
4143 1) stop_stepping and return; to really stop and return to the debugger,
4144 2) keep_going and return to start up again
4145 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
4146 3) set ecs->random_signal to 1, and the decision between 1 and 2
4147 will be made according to the signal handling tables. */
4148
4149 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4150 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
4151 || stop_soon == STOP_QUIETLY_REMOTE)
4152 {
4153 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4154 && stop_after_trap)
4155 {
4156 if (debug_infrun)
4157 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4158 stop_print_frame = 0;
4159 stop_stepping (ecs);
4160 return;
4161 }
4162
4163 /* This is originated from start_remote(), start_inferior() and
4164 shared libraries hook functions. */
4165 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4166 {
4167 if (debug_infrun)
4168 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4169 stop_stepping (ecs);
4170 return;
4171 }
4172
4173 /* This originates from attach_command(). We need to overwrite
4174 the stop_signal here, because some kernels don't ignore a
4175 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4176 See more comments in inferior.h. On the other hand, if we
4177 get a non-SIGSTOP, report it to the user - assume the backend
4178 will handle the SIGSTOP if it should show up later.
4179
4180 Also consider that the attach is complete when we see a
4181 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4182 target extended-remote report it instead of a SIGSTOP
4183 (e.g. gdbserver). We already rely on SIGTRAP being our
4184 signal, so this is no exception.
4185
4186 Also consider that the attach is complete when we see a
4187 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4188 the target to stop all threads of the inferior, in case the
4189 low level attach operation doesn't stop them implicitly. If
4190 they weren't stopped implicitly, then the stub will report a
4191 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4192 other than GDB's request. */
4193 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4194 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4195 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4196 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4197 {
4198 stop_stepping (ecs);
4199 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4200 return;
4201 }
4202
4203 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4204 handles this event. */
4205 ecs->event_thread->control.stop_bpstat
4206 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4207 stop_pc, ecs->ptid, &ecs->ws);
4208
4209 /* Following in case break condition called a
4210 function. */
4211 stop_print_frame = 1;
4212
4213 /* This is where we handle "moribund" watchpoints. Unlike
4214 software breakpoints traps, hardware watchpoint traps are
4215 always distinguishable from random traps. If no high-level
4216 watchpoint is associated with the reported stop data address
4217 anymore, then the bpstat does not explain the signal ---
4218 simply make sure to ignore it if `stopped_by_watchpoint' is
4219 set. */
4220
4221 if (debug_infrun
4222 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4223 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4224 && stopped_by_watchpoint)
4225 fprintf_unfiltered (gdb_stdlog,
4226 "infrun: no user watchpoint explains "
4227 "watchpoint SIGTRAP, ignoring\n");
4228
4229 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4230 at one stage in the past included checks for an inferior
4231 function call's call dummy's return breakpoint. The original
4232 comment, that went with the test, read:
4233
4234 ``End of a stack dummy. Some systems (e.g. Sony news) give
4235 another signal besides SIGTRAP, so check here as well as
4236 above.''
4237
4238 If someone ever tries to get call dummys on a
4239 non-executable stack to work (where the target would stop
4240 with something like a SIGSEGV), then those tests might need
4241 to be re-instated. Given, however, that the tests were only
4242 enabled when momentary breakpoints were not being used, I
4243 suspect that it won't be the case.
4244
4245 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4246 be necessary for call dummies on a non-executable stack on
4247 SPARC. */
4248
4249 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4250 ecs->random_signal
4251 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4252 || stopped_by_watchpoint
4253 || ecs->event_thread->control.trap_expected
4254 || (ecs->event_thread->control.step_range_end
4255 && (ecs->event_thread->control.step_resume_breakpoint
4256 == NULL)));
4257 else
4258 {
4259 ecs->random_signal = !bpstat_explains_signal
4260 (ecs->event_thread->control.stop_bpstat);
4261 if (!ecs->random_signal)
4262 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4263 }
4264 }
4265
4266 /* When we reach this point, we've pretty much decided
4267 that the reason for stopping must've been a random
4268 (unexpected) signal. */
4269
4270 else
4271 ecs->random_signal = 1;
4272
4273 process_event_stop_test:
4274
4275 /* Re-fetch current thread's frame in case we did a
4276 "goto process_event_stop_test" above. */
4277 frame = get_current_frame ();
4278 gdbarch = get_frame_arch (frame);
4279
4280 /* For the program's own signals, act according to
4281 the signal handling tables. */
4282
4283 if (ecs->random_signal)
4284 {
4285 /* Signal not for debugging purposes. */
4286 int printed = 0;
4287 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4288
4289 if (debug_infrun)
4290 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4291 ecs->event_thread->suspend.stop_signal);
4292
4293 stopped_by_random_signal = 1;
4294
4295 if (signal_print[ecs->event_thread->suspend.stop_signal])
4296 {
4297 printed = 1;
4298 target_terminal_ours_for_output ();
4299 print_signal_received_reason
4300 (ecs->event_thread->suspend.stop_signal);
4301 }
4302 /* Always stop on signals if we're either just gaining control
4303 of the program, or the user explicitly requested this thread
4304 to remain stopped. */
4305 if (stop_soon != NO_STOP_QUIETLY
4306 || ecs->event_thread->stop_requested
4307 || (!inf->detaching
4308 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4309 {
4310 stop_stepping (ecs);
4311 return;
4312 }
4313 /* If not going to stop, give terminal back
4314 if we took it away. */
4315 else if (printed)
4316 target_terminal_inferior ();
4317
4318 /* Clear the signal if it should not be passed. */
4319 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4320 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4321
4322 if (ecs->event_thread->prev_pc == stop_pc
4323 && ecs->event_thread->control.trap_expected
4324 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4325 {
4326 /* We were just starting a new sequence, attempting to
4327 single-step off of a breakpoint and expecting a SIGTRAP.
4328 Instead this signal arrives. This signal will take us out
4329 of the stepping range so GDB needs to remember to, when
4330 the signal handler returns, resume stepping off that
4331 breakpoint. */
4332 /* To simplify things, "continue" is forced to use the same
4333 code paths as single-step - set a breakpoint at the
4334 signal return address and then, once hit, step off that
4335 breakpoint. */
4336 if (debug_infrun)
4337 fprintf_unfiltered (gdb_stdlog,
4338 "infrun: signal arrived while stepping over "
4339 "breakpoint\n");
4340
4341 insert_hp_step_resume_breakpoint_at_frame (frame);
4342 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4343 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4344 ecs->event_thread->control.trap_expected = 0;
4345 keep_going (ecs);
4346 return;
4347 }
4348
4349 if (ecs->event_thread->control.step_range_end != 0
4350 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4351 && (ecs->event_thread->control.step_range_start <= stop_pc
4352 && stop_pc < ecs->event_thread->control.step_range_end)
4353 && frame_id_eq (get_stack_frame_id (frame),
4354 ecs->event_thread->control.step_stack_frame_id)
4355 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4356 {
4357 /* The inferior is about to take a signal that will take it
4358 out of the single step range. Set a breakpoint at the
4359 current PC (which is presumably where the signal handler
4360 will eventually return) and then allow the inferior to
4361 run free.
4362
4363 Note that this is only needed for a signal delivered
4364 while in the single-step range. Nested signals aren't a
4365 problem as they eventually all return. */
4366 if (debug_infrun)
4367 fprintf_unfiltered (gdb_stdlog,
4368 "infrun: signal may take us out of "
4369 "single-step range\n");
4370
4371 insert_hp_step_resume_breakpoint_at_frame (frame);
4372 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4373 ecs->event_thread->control.trap_expected = 0;
4374 keep_going (ecs);
4375 return;
4376 }
4377
4378 /* Note: step_resume_breakpoint may be non-NULL. This occures
4379 when either there's a nested signal, or when there's a
4380 pending signal enabled just as the signal handler returns
4381 (leaving the inferior at the step-resume-breakpoint without
4382 actually executing it). Either way continue until the
4383 breakpoint is really hit. */
4384 keep_going (ecs);
4385 return;
4386 }
4387
4388 /* Handle cases caused by hitting a breakpoint. */
4389 {
4390 CORE_ADDR jmp_buf_pc;
4391 struct bpstat_what what;
4392
4393 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4394
4395 if (what.call_dummy)
4396 {
4397 stop_stack_dummy = what.call_dummy;
4398 }
4399
4400 /* If we hit an internal event that triggers symbol changes, the
4401 current frame will be invalidated within bpstat_what (e.g., if
4402 we hit an internal solib event). Re-fetch it. */
4403 frame = get_current_frame ();
4404 gdbarch = get_frame_arch (frame);
4405
4406 switch (what.main_action)
4407 {
4408 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4409 /* If we hit the breakpoint at longjmp while stepping, we
4410 install a momentary breakpoint at the target of the
4411 jmp_buf. */
4412
4413 if (debug_infrun)
4414 fprintf_unfiltered (gdb_stdlog,
4415 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4416
4417 ecs->event_thread->stepping_over_breakpoint = 1;
4418
4419 if (what.is_longjmp)
4420 {
4421 if (!gdbarch_get_longjmp_target_p (gdbarch)
4422 || !gdbarch_get_longjmp_target (gdbarch,
4423 frame, &jmp_buf_pc))
4424 {
4425 if (debug_infrun)
4426 fprintf_unfiltered (gdb_stdlog,
4427 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4428 "(!gdbarch_get_longjmp_target)\n");
4429 keep_going (ecs);
4430 return;
4431 }
4432
4433 /* We're going to replace the current step-resume breakpoint
4434 with a longjmp-resume breakpoint. */
4435 delete_step_resume_breakpoint (ecs->event_thread);
4436
4437 /* Insert a breakpoint at resume address. */
4438 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4439 }
4440 else
4441 {
4442 struct symbol *func = get_frame_function (frame);
4443
4444 if (func)
4445 check_exception_resume (ecs, frame, func);
4446 }
4447 keep_going (ecs);
4448 return;
4449
4450 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4451 if (debug_infrun)
4452 fprintf_unfiltered (gdb_stdlog,
4453 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4454
4455 if (what.is_longjmp)
4456 {
4457 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4458 != NULL);
4459 delete_step_resume_breakpoint (ecs->event_thread);
4460 }
4461 else
4462 {
4463 /* There are several cases to consider.
4464
4465 1. The initiating frame no longer exists. In this case
4466 we must stop, because the exception has gone too far.
4467
4468 2. The initiating frame exists, and is the same as the
4469 current frame. We stop, because the exception has been
4470 caught.
4471
4472 3. The initiating frame exists and is different from
4473 the current frame. This means the exception has been
4474 caught beneath the initiating frame, so keep going. */
4475 struct frame_info *init_frame
4476 = frame_find_by_id (ecs->event_thread->initiating_frame);
4477
4478 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4479 != NULL);
4480 delete_exception_resume_breakpoint (ecs->event_thread);
4481
4482 if (init_frame)
4483 {
4484 struct frame_id current_id
4485 = get_frame_id (get_current_frame ());
4486 if (frame_id_eq (current_id,
4487 ecs->event_thread->initiating_frame))
4488 {
4489 /* Case 2. Fall through. */
4490 }
4491 else
4492 {
4493 /* Case 3. */
4494 keep_going (ecs);
4495 return;
4496 }
4497 }
4498
4499 /* For Cases 1 and 2, remove the step-resume breakpoint,
4500 if it exists. */
4501 delete_step_resume_breakpoint (ecs->event_thread);
4502 }
4503
4504 ecs->event_thread->control.stop_step = 1;
4505 print_end_stepping_range_reason ();
4506 stop_stepping (ecs);
4507 return;
4508
4509 case BPSTAT_WHAT_SINGLE:
4510 if (debug_infrun)
4511 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4512 ecs->event_thread->stepping_over_breakpoint = 1;
4513 /* Still need to check other stuff, at least the case
4514 where we are stepping and step out of the right range. */
4515 break;
4516
4517 case BPSTAT_WHAT_STEP_RESUME:
4518 if (debug_infrun)
4519 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4520
4521 delete_step_resume_breakpoint (ecs->event_thread);
4522 if (ecs->event_thread->control.proceed_to_finish
4523 && execution_direction == EXEC_REVERSE)
4524 {
4525 struct thread_info *tp = ecs->event_thread;
4526
4527 /* We are finishing a function in reverse, and just hit
4528 the step-resume breakpoint at the start address of the
4529 function, and we're almost there -- just need to back
4530 up by one more single-step, which should take us back
4531 to the function call. */
4532 tp->control.step_range_start = tp->control.step_range_end = 1;
4533 keep_going (ecs);
4534 return;
4535 }
4536 fill_in_stop_func (gdbarch, ecs);
4537 if (stop_pc == ecs->stop_func_start
4538 && execution_direction == EXEC_REVERSE)
4539 {
4540 /* We are stepping over a function call in reverse, and
4541 just hit the step-resume breakpoint at the start
4542 address of the function. Go back to single-stepping,
4543 which should take us back to the function call. */
4544 ecs->event_thread->stepping_over_breakpoint = 1;
4545 keep_going (ecs);
4546 return;
4547 }
4548 break;
4549
4550 case BPSTAT_WHAT_STOP_NOISY:
4551 if (debug_infrun)
4552 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4553 stop_print_frame = 1;
4554
4555 /* We are about to nuke the step_resume_breakpointt via the
4556 cleanup chain, so no need to worry about it here. */
4557
4558 stop_stepping (ecs);
4559 return;
4560
4561 case BPSTAT_WHAT_STOP_SILENT:
4562 if (debug_infrun)
4563 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4564 stop_print_frame = 0;
4565
4566 /* We are about to nuke the step_resume_breakpoin via the
4567 cleanup chain, so no need to worry about it here. */
4568
4569 stop_stepping (ecs);
4570 return;
4571
4572 case BPSTAT_WHAT_HP_STEP_RESUME:
4573 if (debug_infrun)
4574 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4575
4576 delete_step_resume_breakpoint (ecs->event_thread);
4577 if (ecs->event_thread->step_after_step_resume_breakpoint)
4578 {
4579 /* Back when the step-resume breakpoint was inserted, we
4580 were trying to single-step off a breakpoint. Go back
4581 to doing that. */
4582 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4583 ecs->event_thread->stepping_over_breakpoint = 1;
4584 keep_going (ecs);
4585 return;
4586 }
4587 break;
4588
4589 case BPSTAT_WHAT_KEEP_CHECKING:
4590 break;
4591 }
4592 }
4593
4594 /* We come here if we hit a breakpoint but should not
4595 stop for it. Possibly we also were stepping
4596 and should stop for that. So fall through and
4597 test for stepping. But, if not stepping,
4598 do not stop. */
4599
4600 /* In all-stop mode, if we're currently stepping but have stopped in
4601 some other thread, we need to switch back to the stepped thread. */
4602 if (!non_stop)
4603 {
4604 struct thread_info *tp;
4605
4606 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4607 ecs->event_thread);
4608 if (tp)
4609 {
4610 /* However, if the current thread is blocked on some internal
4611 breakpoint, and we simply need to step over that breakpoint
4612 to get it going again, do that first. */
4613 if ((ecs->event_thread->control.trap_expected
4614 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4615 || ecs->event_thread->stepping_over_breakpoint)
4616 {
4617 keep_going (ecs);
4618 return;
4619 }
4620
4621 /* If the stepping thread exited, then don't try to switch
4622 back and resume it, which could fail in several different
4623 ways depending on the target. Instead, just keep going.
4624
4625 We can find a stepping dead thread in the thread list in
4626 two cases:
4627
4628 - The target supports thread exit events, and when the
4629 target tries to delete the thread from the thread list,
4630 inferior_ptid pointed at the exiting thread. In such
4631 case, calling delete_thread does not really remove the
4632 thread from the list; instead, the thread is left listed,
4633 with 'exited' state.
4634
4635 - The target's debug interface does not support thread
4636 exit events, and so we have no idea whatsoever if the
4637 previously stepping thread is still alive. For that
4638 reason, we need to synchronously query the target
4639 now. */
4640 if (is_exited (tp->ptid)
4641 || !target_thread_alive (tp->ptid))
4642 {
4643 if (debug_infrun)
4644 fprintf_unfiltered (gdb_stdlog,
4645 "infrun: not switching back to "
4646 "stepped thread, it has vanished\n");
4647
4648 delete_thread (tp->ptid);
4649 keep_going (ecs);
4650 return;
4651 }
4652
4653 /* Otherwise, we no longer expect a trap in the current thread.
4654 Clear the trap_expected flag before switching back -- this is
4655 what keep_going would do as well, if we called it. */
4656 ecs->event_thread->control.trap_expected = 0;
4657
4658 if (debug_infrun)
4659 fprintf_unfiltered (gdb_stdlog,
4660 "infrun: switching back to stepped thread\n");
4661
4662 ecs->event_thread = tp;
4663 ecs->ptid = tp->ptid;
4664 context_switch (ecs->ptid);
4665 keep_going (ecs);
4666 return;
4667 }
4668 }
4669
4670 if (ecs->event_thread->control.step_resume_breakpoint)
4671 {
4672 if (debug_infrun)
4673 fprintf_unfiltered (gdb_stdlog,
4674 "infrun: step-resume breakpoint is inserted\n");
4675
4676 /* Having a step-resume breakpoint overrides anything
4677 else having to do with stepping commands until
4678 that breakpoint is reached. */
4679 keep_going (ecs);
4680 return;
4681 }
4682
4683 if (ecs->event_thread->control.step_range_end == 0)
4684 {
4685 if (debug_infrun)
4686 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4687 /* Likewise if we aren't even stepping. */
4688 keep_going (ecs);
4689 return;
4690 }
4691
4692 /* Re-fetch current thread's frame in case the code above caused
4693 the frame cache to be re-initialized, making our FRAME variable
4694 a dangling pointer. */
4695 frame = get_current_frame ();
4696 gdbarch = get_frame_arch (frame);
4697 fill_in_stop_func (gdbarch, ecs);
4698
4699 /* If stepping through a line, keep going if still within it.
4700
4701 Note that step_range_end is the address of the first instruction
4702 beyond the step range, and NOT the address of the last instruction
4703 within it!
4704
4705 Note also that during reverse execution, we may be stepping
4706 through a function epilogue and therefore must detect when
4707 the current-frame changes in the middle of a line. */
4708
4709 if (stop_pc >= ecs->event_thread->control.step_range_start
4710 && stop_pc < ecs->event_thread->control.step_range_end
4711 && (execution_direction != EXEC_REVERSE
4712 || frame_id_eq (get_frame_id (frame),
4713 ecs->event_thread->control.step_frame_id)))
4714 {
4715 if (debug_infrun)
4716 fprintf_unfiltered
4717 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4718 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4719 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4720
4721 /* When stepping backward, stop at beginning of line range
4722 (unless it's the function entry point, in which case
4723 keep going back to the call point). */
4724 if (stop_pc == ecs->event_thread->control.step_range_start
4725 && stop_pc != ecs->stop_func_start
4726 && execution_direction == EXEC_REVERSE)
4727 {
4728 ecs->event_thread->control.stop_step = 1;
4729 print_end_stepping_range_reason ();
4730 stop_stepping (ecs);
4731 }
4732 else
4733 keep_going (ecs);
4734
4735 return;
4736 }
4737
4738 /* We stepped out of the stepping range. */
4739
4740 /* If we are stepping at the source level and entered the runtime
4741 loader dynamic symbol resolution code...
4742
4743 EXEC_FORWARD: we keep on single stepping until we exit the run
4744 time loader code and reach the callee's address.
4745
4746 EXEC_REVERSE: we've already executed the callee (backward), and
4747 the runtime loader code is handled just like any other
4748 undebuggable function call. Now we need only keep stepping
4749 backward through the trampoline code, and that's handled further
4750 down, so there is nothing for us to do here. */
4751
4752 if (execution_direction != EXEC_REVERSE
4753 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4754 && in_solib_dynsym_resolve_code (stop_pc))
4755 {
4756 CORE_ADDR pc_after_resolver =
4757 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4758
4759 if (debug_infrun)
4760 fprintf_unfiltered (gdb_stdlog,
4761 "infrun: stepped into dynsym resolve code\n");
4762
4763 if (pc_after_resolver)
4764 {
4765 /* Set up a step-resume breakpoint at the address
4766 indicated by SKIP_SOLIB_RESOLVER. */
4767 struct symtab_and_line sr_sal;
4768
4769 init_sal (&sr_sal);
4770 sr_sal.pc = pc_after_resolver;
4771 sr_sal.pspace = get_frame_program_space (frame);
4772
4773 insert_step_resume_breakpoint_at_sal (gdbarch,
4774 sr_sal, null_frame_id);
4775 }
4776
4777 keep_going (ecs);
4778 return;
4779 }
4780
4781 if (ecs->event_thread->control.step_range_end != 1
4782 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4783 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4784 && get_frame_type (frame) == SIGTRAMP_FRAME)
4785 {
4786 if (debug_infrun)
4787 fprintf_unfiltered (gdb_stdlog,
4788 "infrun: stepped into signal trampoline\n");
4789 /* The inferior, while doing a "step" or "next", has ended up in
4790 a signal trampoline (either by a signal being delivered or by
4791 the signal handler returning). Just single-step until the
4792 inferior leaves the trampoline (either by calling the handler
4793 or returning). */
4794 keep_going (ecs);
4795 return;
4796 }
4797
4798 /* Check for subroutine calls. The check for the current frame
4799 equalling the step ID is not necessary - the check of the
4800 previous frame's ID is sufficient - but it is a common case and
4801 cheaper than checking the previous frame's ID.
4802
4803 NOTE: frame_id_eq will never report two invalid frame IDs as
4804 being equal, so to get into this block, both the current and
4805 previous frame must have valid frame IDs. */
4806 /* The outer_frame_id check is a heuristic to detect stepping
4807 through startup code. If we step over an instruction which
4808 sets the stack pointer from an invalid value to a valid value,
4809 we may detect that as a subroutine call from the mythical
4810 "outermost" function. This could be fixed by marking
4811 outermost frames as !stack_p,code_p,special_p. Then the
4812 initial outermost frame, before sp was valid, would
4813 have code_addr == &_start. See the comment in frame_id_eq
4814 for more. */
4815 if (!frame_id_eq (get_stack_frame_id (frame),
4816 ecs->event_thread->control.step_stack_frame_id)
4817 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4818 ecs->event_thread->control.step_stack_frame_id)
4819 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4820 outer_frame_id)
4821 || step_start_function != find_pc_function (stop_pc))))
4822 {
4823 CORE_ADDR real_stop_pc;
4824
4825 if (debug_infrun)
4826 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4827
4828 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4829 || ((ecs->event_thread->control.step_range_end == 1)
4830 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4831 ecs->stop_func_start)))
4832 {
4833 /* I presume that step_over_calls is only 0 when we're
4834 supposed to be stepping at the assembly language level
4835 ("stepi"). Just stop. */
4836 /* Also, maybe we just did a "nexti" inside a prolog, so we
4837 thought it was a subroutine call but it was not. Stop as
4838 well. FENN */
4839 /* And this works the same backward as frontward. MVS */
4840 ecs->event_thread->control.stop_step = 1;
4841 print_end_stepping_range_reason ();
4842 stop_stepping (ecs);
4843 return;
4844 }
4845
4846 /* Reverse stepping through solib trampolines. */
4847
4848 if (execution_direction == EXEC_REVERSE
4849 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4850 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4851 || (ecs->stop_func_start == 0
4852 && in_solib_dynsym_resolve_code (stop_pc))))
4853 {
4854 /* Any solib trampoline code can be handled in reverse
4855 by simply continuing to single-step. We have already
4856 executed the solib function (backwards), and a few
4857 steps will take us back through the trampoline to the
4858 caller. */
4859 keep_going (ecs);
4860 return;
4861 }
4862
4863 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4864 {
4865 /* We're doing a "next".
4866
4867 Normal (forward) execution: set a breakpoint at the
4868 callee's return address (the address at which the caller
4869 will resume).
4870
4871 Reverse (backward) execution. set the step-resume
4872 breakpoint at the start of the function that we just
4873 stepped into (backwards), and continue to there. When we
4874 get there, we'll need to single-step back to the caller. */
4875
4876 if (execution_direction == EXEC_REVERSE)
4877 {
4878 struct symtab_and_line sr_sal;
4879
4880 /* Normal function call return (static or dynamic). */
4881 init_sal (&sr_sal);
4882 sr_sal.pc = ecs->stop_func_start;
4883 sr_sal.pspace = get_frame_program_space (frame);
4884 insert_step_resume_breakpoint_at_sal (gdbarch,
4885 sr_sal, null_frame_id);
4886 }
4887 else
4888 insert_step_resume_breakpoint_at_caller (frame);
4889
4890 keep_going (ecs);
4891 return;
4892 }
4893
4894 /* If we are in a function call trampoline (a stub between the
4895 calling routine and the real function), locate the real
4896 function. That's what tells us (a) whether we want to step
4897 into it at all, and (b) what prologue we want to run to the
4898 end of, if we do step into it. */
4899 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4900 if (real_stop_pc == 0)
4901 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4902 if (real_stop_pc != 0)
4903 ecs->stop_func_start = real_stop_pc;
4904
4905 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4906 {
4907 struct symtab_and_line sr_sal;
4908
4909 init_sal (&sr_sal);
4910 sr_sal.pc = ecs->stop_func_start;
4911 sr_sal.pspace = get_frame_program_space (frame);
4912
4913 insert_step_resume_breakpoint_at_sal (gdbarch,
4914 sr_sal, null_frame_id);
4915 keep_going (ecs);
4916 return;
4917 }
4918
4919 /* If we have line number information for the function we are
4920 thinking of stepping into and the function isn't on the skip
4921 list, step into it.
4922
4923 If there are several symtabs at that PC (e.g. with include
4924 files), just want to know whether *any* of them have line
4925 numbers. find_pc_line handles this. */
4926 {
4927 struct symtab_and_line tmp_sal;
4928
4929 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4930 if (tmp_sal.line != 0
4931 && !function_pc_is_marked_for_skip (ecs->stop_func_start))
4932 {
4933 if (execution_direction == EXEC_REVERSE)
4934 handle_step_into_function_backward (gdbarch, ecs);
4935 else
4936 handle_step_into_function (gdbarch, ecs);
4937 return;
4938 }
4939 }
4940
4941 /* If we have no line number and the step-stop-if-no-debug is
4942 set, we stop the step so that the user has a chance to switch
4943 in assembly mode. */
4944 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4945 && step_stop_if_no_debug)
4946 {
4947 ecs->event_thread->control.stop_step = 1;
4948 print_end_stepping_range_reason ();
4949 stop_stepping (ecs);
4950 return;
4951 }
4952
4953 if (execution_direction == EXEC_REVERSE)
4954 {
4955 /* Set a breakpoint at callee's start address.
4956 From there we can step once and be back in the caller. */
4957 struct symtab_and_line sr_sal;
4958
4959 init_sal (&sr_sal);
4960 sr_sal.pc = ecs->stop_func_start;
4961 sr_sal.pspace = get_frame_program_space (frame);
4962 insert_step_resume_breakpoint_at_sal (gdbarch,
4963 sr_sal, null_frame_id);
4964 }
4965 else
4966 /* Set a breakpoint at callee's return address (the address
4967 at which the caller will resume). */
4968 insert_step_resume_breakpoint_at_caller (frame);
4969
4970 keep_going (ecs);
4971 return;
4972 }
4973
4974 /* Reverse stepping through solib trampolines. */
4975
4976 if (execution_direction == EXEC_REVERSE
4977 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4978 {
4979 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4980 || (ecs->stop_func_start == 0
4981 && in_solib_dynsym_resolve_code (stop_pc)))
4982 {
4983 /* Any solib trampoline code can be handled in reverse
4984 by simply continuing to single-step. We have already
4985 executed the solib function (backwards), and a few
4986 steps will take us back through the trampoline to the
4987 caller. */
4988 keep_going (ecs);
4989 return;
4990 }
4991 else if (in_solib_dynsym_resolve_code (stop_pc))
4992 {
4993 /* Stepped backward into the solib dynsym resolver.
4994 Set a breakpoint at its start and continue, then
4995 one more step will take us out. */
4996 struct symtab_and_line sr_sal;
4997
4998 init_sal (&sr_sal);
4999 sr_sal.pc = ecs->stop_func_start;
5000 sr_sal.pspace = get_frame_program_space (frame);
5001 insert_step_resume_breakpoint_at_sal (gdbarch,
5002 sr_sal, null_frame_id);
5003 keep_going (ecs);
5004 return;
5005 }
5006 }
5007
5008 /* If we're in the return path from a shared library trampoline,
5009 we want to proceed through the trampoline when stepping. */
5010 if (gdbarch_in_solib_return_trampoline (gdbarch,
5011 stop_pc, ecs->stop_func_name))
5012 {
5013 /* Determine where this trampoline returns. */
5014 CORE_ADDR real_stop_pc;
5015
5016 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5017
5018 if (debug_infrun)
5019 fprintf_unfiltered (gdb_stdlog,
5020 "infrun: stepped into solib return tramp\n");
5021
5022 /* Only proceed through if we know where it's going. */
5023 if (real_stop_pc)
5024 {
5025 /* And put the step-breakpoint there and go until there. */
5026 struct symtab_and_line sr_sal;
5027
5028 init_sal (&sr_sal); /* initialize to zeroes */
5029 sr_sal.pc = real_stop_pc;
5030 sr_sal.section = find_pc_overlay (sr_sal.pc);
5031 sr_sal.pspace = get_frame_program_space (frame);
5032
5033 /* Do not specify what the fp should be when we stop since
5034 on some machines the prologue is where the new fp value
5035 is established. */
5036 insert_step_resume_breakpoint_at_sal (gdbarch,
5037 sr_sal, null_frame_id);
5038
5039 /* Restart without fiddling with the step ranges or
5040 other state. */
5041 keep_going (ecs);
5042 return;
5043 }
5044 }
5045
5046 stop_pc_sal = find_pc_line (stop_pc, 0);
5047
5048 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5049 the trampoline processing logic, however, there are some trampolines
5050 that have no names, so we should do trampoline handling first. */
5051 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5052 && ecs->stop_func_name == NULL
5053 && stop_pc_sal.line == 0)
5054 {
5055 if (debug_infrun)
5056 fprintf_unfiltered (gdb_stdlog,
5057 "infrun: stepped into undebuggable function\n");
5058
5059 /* The inferior just stepped into, or returned to, an
5060 undebuggable function (where there is no debugging information
5061 and no line number corresponding to the address where the
5062 inferior stopped). Since we want to skip this kind of code,
5063 we keep going until the inferior returns from this
5064 function - unless the user has asked us not to (via
5065 set step-mode) or we no longer know how to get back
5066 to the call site. */
5067 if (step_stop_if_no_debug
5068 || !frame_id_p (frame_unwind_caller_id (frame)))
5069 {
5070 /* If we have no line number and the step-stop-if-no-debug
5071 is set, we stop the step so that the user has a chance to
5072 switch in assembly mode. */
5073 ecs->event_thread->control.stop_step = 1;
5074 print_end_stepping_range_reason ();
5075 stop_stepping (ecs);
5076 return;
5077 }
5078 else
5079 {
5080 /* Set a breakpoint at callee's return address (the address
5081 at which the caller will resume). */
5082 insert_step_resume_breakpoint_at_caller (frame);
5083 keep_going (ecs);
5084 return;
5085 }
5086 }
5087
5088 if (ecs->event_thread->control.step_range_end == 1)
5089 {
5090 /* It is stepi or nexti. We always want to stop stepping after
5091 one instruction. */
5092 if (debug_infrun)
5093 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5094 ecs->event_thread->control.stop_step = 1;
5095 print_end_stepping_range_reason ();
5096 stop_stepping (ecs);
5097 return;
5098 }
5099
5100 if (stop_pc_sal.line == 0)
5101 {
5102 /* We have no line number information. That means to stop
5103 stepping (does this always happen right after one instruction,
5104 when we do "s" in a function with no line numbers,
5105 or can this happen as a result of a return or longjmp?). */
5106 if (debug_infrun)
5107 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5108 ecs->event_thread->control.stop_step = 1;
5109 print_end_stepping_range_reason ();
5110 stop_stepping (ecs);
5111 return;
5112 }
5113
5114 /* Look for "calls" to inlined functions, part one. If the inline
5115 frame machinery detected some skipped call sites, we have entered
5116 a new inline function. */
5117
5118 if (frame_id_eq (get_frame_id (get_current_frame ()),
5119 ecs->event_thread->control.step_frame_id)
5120 && inline_skipped_frames (ecs->ptid))
5121 {
5122 struct symtab_and_line call_sal;
5123
5124 if (debug_infrun)
5125 fprintf_unfiltered (gdb_stdlog,
5126 "infrun: stepped into inlined function\n");
5127
5128 find_frame_sal (get_current_frame (), &call_sal);
5129
5130 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5131 {
5132 /* For "step", we're going to stop. But if the call site
5133 for this inlined function is on the same source line as
5134 we were previously stepping, go down into the function
5135 first. Otherwise stop at the call site. */
5136
5137 if (call_sal.line == ecs->event_thread->current_line
5138 && call_sal.symtab == ecs->event_thread->current_symtab)
5139 step_into_inline_frame (ecs->ptid);
5140
5141 ecs->event_thread->control.stop_step = 1;
5142 print_end_stepping_range_reason ();
5143 stop_stepping (ecs);
5144 return;
5145 }
5146 else
5147 {
5148 /* For "next", we should stop at the call site if it is on a
5149 different source line. Otherwise continue through the
5150 inlined function. */
5151 if (call_sal.line == ecs->event_thread->current_line
5152 && call_sal.symtab == ecs->event_thread->current_symtab)
5153 keep_going (ecs);
5154 else
5155 {
5156 ecs->event_thread->control.stop_step = 1;
5157 print_end_stepping_range_reason ();
5158 stop_stepping (ecs);
5159 }
5160 return;
5161 }
5162 }
5163
5164 /* Look for "calls" to inlined functions, part two. If we are still
5165 in the same real function we were stepping through, but we have
5166 to go further up to find the exact frame ID, we are stepping
5167 through a more inlined call beyond its call site. */
5168
5169 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5170 && !frame_id_eq (get_frame_id (get_current_frame ()),
5171 ecs->event_thread->control.step_frame_id)
5172 && stepped_in_from (get_current_frame (),
5173 ecs->event_thread->control.step_frame_id))
5174 {
5175 if (debug_infrun)
5176 fprintf_unfiltered (gdb_stdlog,
5177 "infrun: stepping through inlined function\n");
5178
5179 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5180 keep_going (ecs);
5181 else
5182 {
5183 ecs->event_thread->control.stop_step = 1;
5184 print_end_stepping_range_reason ();
5185 stop_stepping (ecs);
5186 }
5187 return;
5188 }
5189
5190 if ((stop_pc == stop_pc_sal.pc)
5191 && (ecs->event_thread->current_line != stop_pc_sal.line
5192 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5193 {
5194 /* We are at the start of a different line. So stop. Note that
5195 we don't stop if we step into the middle of a different line.
5196 That is said to make things like for (;;) statements work
5197 better. */
5198 if (debug_infrun)
5199 fprintf_unfiltered (gdb_stdlog,
5200 "infrun: stepped to a different line\n");
5201 ecs->event_thread->control.stop_step = 1;
5202 print_end_stepping_range_reason ();
5203 stop_stepping (ecs);
5204 return;
5205 }
5206
5207 /* We aren't done stepping.
5208
5209 Optimize by setting the stepping range to the line.
5210 (We might not be in the original line, but if we entered a
5211 new line in mid-statement, we continue stepping. This makes
5212 things like for(;;) statements work better.) */
5213
5214 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5215 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5216 set_step_info (frame, stop_pc_sal);
5217
5218 if (debug_infrun)
5219 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5220 keep_going (ecs);
5221 }
5222
5223 /* Is thread TP in the middle of single-stepping? */
5224
5225 static int
5226 currently_stepping (struct thread_info *tp)
5227 {
5228 return ((tp->control.step_range_end
5229 && tp->control.step_resume_breakpoint == NULL)
5230 || tp->control.trap_expected
5231 || bpstat_should_step ());
5232 }
5233
5234 /* Returns true if any thread *but* the one passed in "data" is in the
5235 middle of stepping or of handling a "next". */
5236
5237 static int
5238 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5239 {
5240 if (tp == data)
5241 return 0;
5242
5243 return (tp->control.step_range_end
5244 || tp->control.trap_expected);
5245 }
5246
5247 /* Inferior has stepped into a subroutine call with source code that
5248 we should not step over. Do step to the first line of code in
5249 it. */
5250
5251 static void
5252 handle_step_into_function (struct gdbarch *gdbarch,
5253 struct execution_control_state *ecs)
5254 {
5255 struct symtab *s;
5256 struct symtab_and_line stop_func_sal, sr_sal;
5257
5258 fill_in_stop_func (gdbarch, ecs);
5259
5260 s = find_pc_symtab (stop_pc);
5261 if (s && s->language != language_asm)
5262 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5263 ecs->stop_func_start);
5264
5265 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5266 /* Use the step_resume_break to step until the end of the prologue,
5267 even if that involves jumps (as it seems to on the vax under
5268 4.2). */
5269 /* If the prologue ends in the middle of a source line, continue to
5270 the end of that source line (if it is still within the function).
5271 Otherwise, just go to end of prologue. */
5272 if (stop_func_sal.end
5273 && stop_func_sal.pc != ecs->stop_func_start
5274 && stop_func_sal.end < ecs->stop_func_end)
5275 ecs->stop_func_start = stop_func_sal.end;
5276
5277 /* Architectures which require breakpoint adjustment might not be able
5278 to place a breakpoint at the computed address. If so, the test
5279 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5280 ecs->stop_func_start to an address at which a breakpoint may be
5281 legitimately placed.
5282
5283 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5284 made, GDB will enter an infinite loop when stepping through
5285 optimized code consisting of VLIW instructions which contain
5286 subinstructions corresponding to different source lines. On
5287 FR-V, it's not permitted to place a breakpoint on any but the
5288 first subinstruction of a VLIW instruction. When a breakpoint is
5289 set, GDB will adjust the breakpoint address to the beginning of
5290 the VLIW instruction. Thus, we need to make the corresponding
5291 adjustment here when computing the stop address. */
5292
5293 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5294 {
5295 ecs->stop_func_start
5296 = gdbarch_adjust_breakpoint_address (gdbarch,
5297 ecs->stop_func_start);
5298 }
5299
5300 if (ecs->stop_func_start == stop_pc)
5301 {
5302 /* We are already there: stop now. */
5303 ecs->event_thread->control.stop_step = 1;
5304 print_end_stepping_range_reason ();
5305 stop_stepping (ecs);
5306 return;
5307 }
5308 else
5309 {
5310 /* Put the step-breakpoint there and go until there. */
5311 init_sal (&sr_sal); /* initialize to zeroes */
5312 sr_sal.pc = ecs->stop_func_start;
5313 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5314 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5315
5316 /* Do not specify what the fp should be when we stop since on
5317 some machines the prologue is where the new fp value is
5318 established. */
5319 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5320
5321 /* And make sure stepping stops right away then. */
5322 ecs->event_thread->control.step_range_end
5323 = ecs->event_thread->control.step_range_start;
5324 }
5325 keep_going (ecs);
5326 }
5327
5328 /* Inferior has stepped backward into a subroutine call with source
5329 code that we should not step over. Do step to the beginning of the
5330 last line of code in it. */
5331
5332 static void
5333 handle_step_into_function_backward (struct gdbarch *gdbarch,
5334 struct execution_control_state *ecs)
5335 {
5336 struct symtab *s;
5337 struct symtab_and_line stop_func_sal;
5338
5339 fill_in_stop_func (gdbarch, ecs);
5340
5341 s = find_pc_symtab (stop_pc);
5342 if (s && s->language != language_asm)
5343 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5344 ecs->stop_func_start);
5345
5346 stop_func_sal = find_pc_line (stop_pc, 0);
5347
5348 /* OK, we're just going to keep stepping here. */
5349 if (stop_func_sal.pc == stop_pc)
5350 {
5351 /* We're there already. Just stop stepping now. */
5352 ecs->event_thread->control.stop_step = 1;
5353 print_end_stepping_range_reason ();
5354 stop_stepping (ecs);
5355 }
5356 else
5357 {
5358 /* Else just reset the step range and keep going.
5359 No step-resume breakpoint, they don't work for
5360 epilogues, which can have multiple entry paths. */
5361 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5362 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5363 keep_going (ecs);
5364 }
5365 return;
5366 }
5367
5368 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5369 This is used to both functions and to skip over code. */
5370
5371 static void
5372 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5373 struct symtab_and_line sr_sal,
5374 struct frame_id sr_id,
5375 enum bptype sr_type)
5376 {
5377 /* There should never be more than one step-resume or longjmp-resume
5378 breakpoint per thread, so we should never be setting a new
5379 step_resume_breakpoint when one is already active. */
5380 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5381 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5382
5383 if (debug_infrun)
5384 fprintf_unfiltered (gdb_stdlog,
5385 "infrun: inserting step-resume breakpoint at %s\n",
5386 paddress (gdbarch, sr_sal.pc));
5387
5388 inferior_thread ()->control.step_resume_breakpoint
5389 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5390 }
5391
5392 void
5393 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5394 struct symtab_and_line sr_sal,
5395 struct frame_id sr_id)
5396 {
5397 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5398 sr_sal, sr_id,
5399 bp_step_resume);
5400 }
5401
5402 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5403 This is used to skip a potential signal handler.
5404
5405 This is called with the interrupted function's frame. The signal
5406 handler, when it returns, will resume the interrupted function at
5407 RETURN_FRAME.pc. */
5408
5409 static void
5410 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5411 {
5412 struct symtab_and_line sr_sal;
5413 struct gdbarch *gdbarch;
5414
5415 gdb_assert (return_frame != NULL);
5416 init_sal (&sr_sal); /* initialize to zeros */
5417
5418 gdbarch = get_frame_arch (return_frame);
5419 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5420 sr_sal.section = find_pc_overlay (sr_sal.pc);
5421 sr_sal.pspace = get_frame_program_space (return_frame);
5422
5423 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5424 get_stack_frame_id (return_frame),
5425 bp_hp_step_resume);
5426 }
5427
5428 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5429 is used to skip a function after stepping into it (for "next" or if
5430 the called function has no debugging information).
5431
5432 The current function has almost always been reached by single
5433 stepping a call or return instruction. NEXT_FRAME belongs to the
5434 current function, and the breakpoint will be set at the caller's
5435 resume address.
5436
5437 This is a separate function rather than reusing
5438 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5439 get_prev_frame, which may stop prematurely (see the implementation
5440 of frame_unwind_caller_id for an example). */
5441
5442 static void
5443 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5444 {
5445 struct symtab_and_line sr_sal;
5446 struct gdbarch *gdbarch;
5447
5448 /* We shouldn't have gotten here if we don't know where the call site
5449 is. */
5450 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5451
5452 init_sal (&sr_sal); /* initialize to zeros */
5453
5454 gdbarch = frame_unwind_caller_arch (next_frame);
5455 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5456 frame_unwind_caller_pc (next_frame));
5457 sr_sal.section = find_pc_overlay (sr_sal.pc);
5458 sr_sal.pspace = frame_unwind_program_space (next_frame);
5459
5460 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5461 frame_unwind_caller_id (next_frame));
5462 }
5463
5464 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5465 new breakpoint at the target of a jmp_buf. The handling of
5466 longjmp-resume uses the same mechanisms used for handling
5467 "step-resume" breakpoints. */
5468
5469 static void
5470 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5471 {
5472 /* There should never be more than one step-resume or longjmp-resume
5473 breakpoint per thread, so we should never be setting a new
5474 longjmp_resume_breakpoint when one is already active. */
5475 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5476
5477 if (debug_infrun)
5478 fprintf_unfiltered (gdb_stdlog,
5479 "infrun: inserting longjmp-resume breakpoint at %s\n",
5480 paddress (gdbarch, pc));
5481
5482 inferior_thread ()->control.step_resume_breakpoint =
5483 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5484 }
5485
5486 /* Insert an exception resume breakpoint. TP is the thread throwing
5487 the exception. The block B is the block of the unwinder debug hook
5488 function. FRAME is the frame corresponding to the call to this
5489 function. SYM is the symbol of the function argument holding the
5490 target PC of the exception. */
5491
5492 static void
5493 insert_exception_resume_breakpoint (struct thread_info *tp,
5494 struct block *b,
5495 struct frame_info *frame,
5496 struct symbol *sym)
5497 {
5498 volatile struct gdb_exception e;
5499
5500 /* We want to ignore errors here. */
5501 TRY_CATCH (e, RETURN_MASK_ERROR)
5502 {
5503 struct symbol *vsym;
5504 struct value *value;
5505 CORE_ADDR handler;
5506 struct breakpoint *bp;
5507
5508 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5509 value = read_var_value (vsym, frame);
5510 /* If the value was optimized out, revert to the old behavior. */
5511 if (! value_optimized_out (value))
5512 {
5513 handler = value_as_address (value);
5514
5515 if (debug_infrun)
5516 fprintf_unfiltered (gdb_stdlog,
5517 "infrun: exception resume at %lx\n",
5518 (unsigned long) handler);
5519
5520 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5521 handler, bp_exception_resume);
5522 bp->thread = tp->num;
5523 inferior_thread ()->control.exception_resume_breakpoint = bp;
5524 }
5525 }
5526 }
5527
5528 /* This is called when an exception has been intercepted. Check to
5529 see whether the exception's destination is of interest, and if so,
5530 set an exception resume breakpoint there. */
5531
5532 static void
5533 check_exception_resume (struct execution_control_state *ecs,
5534 struct frame_info *frame, struct symbol *func)
5535 {
5536 volatile struct gdb_exception e;
5537
5538 TRY_CATCH (e, RETURN_MASK_ERROR)
5539 {
5540 struct block *b;
5541 struct dict_iterator iter;
5542 struct symbol *sym;
5543 int argno = 0;
5544
5545 /* The exception breakpoint is a thread-specific breakpoint on
5546 the unwinder's debug hook, declared as:
5547
5548 void _Unwind_DebugHook (void *cfa, void *handler);
5549
5550 The CFA argument indicates the frame to which control is
5551 about to be transferred. HANDLER is the destination PC.
5552
5553 We ignore the CFA and set a temporary breakpoint at HANDLER.
5554 This is not extremely efficient but it avoids issues in gdb
5555 with computing the DWARF CFA, and it also works even in weird
5556 cases such as throwing an exception from inside a signal
5557 handler. */
5558
5559 b = SYMBOL_BLOCK_VALUE (func);
5560 ALL_BLOCK_SYMBOLS (b, iter, sym)
5561 {
5562 if (!SYMBOL_IS_ARGUMENT (sym))
5563 continue;
5564
5565 if (argno == 0)
5566 ++argno;
5567 else
5568 {
5569 insert_exception_resume_breakpoint (ecs->event_thread,
5570 b, frame, sym);
5571 break;
5572 }
5573 }
5574 }
5575 }
5576
5577 static void
5578 stop_stepping (struct execution_control_state *ecs)
5579 {
5580 if (debug_infrun)
5581 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5582
5583 /* Let callers know we don't want to wait for the inferior anymore. */
5584 ecs->wait_some_more = 0;
5585 }
5586
5587 /* This function handles various cases where we need to continue
5588 waiting for the inferior. */
5589 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5590
5591 static void
5592 keep_going (struct execution_control_state *ecs)
5593 {
5594 /* Make sure normal_stop is called if we get a QUIT handled before
5595 reaching resume. */
5596 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5597
5598 /* Save the pc before execution, to compare with pc after stop. */
5599 ecs->event_thread->prev_pc
5600 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5601
5602 /* If we did not do break;, it means we should keep running the
5603 inferior and not return to debugger. */
5604
5605 if (ecs->event_thread->control.trap_expected
5606 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5607 {
5608 /* We took a signal (which we are supposed to pass through to
5609 the inferior, else we'd not get here) and we haven't yet
5610 gotten our trap. Simply continue. */
5611
5612 discard_cleanups (old_cleanups);
5613 resume (currently_stepping (ecs->event_thread),
5614 ecs->event_thread->suspend.stop_signal);
5615 }
5616 else
5617 {
5618 /* Either the trap was not expected, but we are continuing
5619 anyway (the user asked that this signal be passed to the
5620 child)
5621 -- or --
5622 The signal was SIGTRAP, e.g. it was our signal, but we
5623 decided we should resume from it.
5624
5625 We're going to run this baby now!
5626
5627 Note that insert_breakpoints won't try to re-insert
5628 already inserted breakpoints. Therefore, we don't
5629 care if breakpoints were already inserted, or not. */
5630
5631 if (ecs->event_thread->stepping_over_breakpoint)
5632 {
5633 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5634
5635 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5636 /* Since we can't do a displaced step, we have to remove
5637 the breakpoint while we step it. To keep things
5638 simple, we remove them all. */
5639 remove_breakpoints ();
5640 }
5641 else
5642 {
5643 volatile struct gdb_exception e;
5644
5645 /* Stop stepping when inserting breakpoints
5646 has failed. */
5647 TRY_CATCH (e, RETURN_MASK_ERROR)
5648 {
5649 insert_breakpoints ();
5650 }
5651 if (e.reason < 0)
5652 {
5653 exception_print (gdb_stderr, e);
5654 stop_stepping (ecs);
5655 return;
5656 }
5657 }
5658
5659 ecs->event_thread->control.trap_expected
5660 = ecs->event_thread->stepping_over_breakpoint;
5661
5662 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5663 specifies that such a signal should be delivered to the
5664 target program).
5665
5666 Typically, this would occure when a user is debugging a
5667 target monitor on a simulator: the target monitor sets a
5668 breakpoint; the simulator encounters this break-point and
5669 halts the simulation handing control to GDB; GDB, noteing
5670 that the break-point isn't valid, returns control back to the
5671 simulator; the simulator then delivers the hardware
5672 equivalent of a SIGNAL_TRAP to the program being debugged. */
5673
5674 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5675 && !signal_program[ecs->event_thread->suspend.stop_signal])
5676 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5677
5678 discard_cleanups (old_cleanups);
5679 resume (currently_stepping (ecs->event_thread),
5680 ecs->event_thread->suspend.stop_signal);
5681 }
5682
5683 prepare_to_wait (ecs);
5684 }
5685
5686 /* This function normally comes after a resume, before
5687 handle_inferior_event exits. It takes care of any last bits of
5688 housekeeping, and sets the all-important wait_some_more flag. */
5689
5690 static void
5691 prepare_to_wait (struct execution_control_state *ecs)
5692 {
5693 if (debug_infrun)
5694 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5695
5696 /* This is the old end of the while loop. Let everybody know we
5697 want to wait for the inferior some more and get called again
5698 soon. */
5699 ecs->wait_some_more = 1;
5700 }
5701
5702 /* Several print_*_reason functions to print why the inferior has stopped.
5703 We always print something when the inferior exits, or receives a signal.
5704 The rest of the cases are dealt with later on in normal_stop and
5705 print_it_typical. Ideally there should be a call to one of these
5706 print_*_reason functions functions from handle_inferior_event each time
5707 stop_stepping is called. */
5708
5709 /* Print why the inferior has stopped.
5710 We are done with a step/next/si/ni command, print why the inferior has
5711 stopped. For now print nothing. Print a message only if not in the middle
5712 of doing a "step n" operation for n > 1. */
5713
5714 static void
5715 print_end_stepping_range_reason (void)
5716 {
5717 if ((!inferior_thread ()->step_multi
5718 || !inferior_thread ()->control.stop_step)
5719 && ui_out_is_mi_like_p (current_uiout))
5720 ui_out_field_string (current_uiout, "reason",
5721 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5722 }
5723
5724 /* The inferior was terminated by a signal, print why it stopped. */
5725
5726 static void
5727 print_signal_exited_reason (enum target_signal siggnal)
5728 {
5729 struct ui_out *uiout = current_uiout;
5730
5731 annotate_signalled ();
5732 if (ui_out_is_mi_like_p (uiout))
5733 ui_out_field_string
5734 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5735 ui_out_text (uiout, "\nProgram terminated with signal ");
5736 annotate_signal_name ();
5737 ui_out_field_string (uiout, "signal-name",
5738 target_signal_to_name (siggnal));
5739 annotate_signal_name_end ();
5740 ui_out_text (uiout, ", ");
5741 annotate_signal_string ();
5742 ui_out_field_string (uiout, "signal-meaning",
5743 target_signal_to_string (siggnal));
5744 annotate_signal_string_end ();
5745 ui_out_text (uiout, ".\n");
5746 ui_out_text (uiout, "The program no longer exists.\n");
5747 }
5748
5749 /* The inferior program is finished, print why it stopped. */
5750
5751 static void
5752 print_exited_reason (int exitstatus)
5753 {
5754 struct inferior *inf = current_inferior ();
5755 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5756 struct ui_out *uiout = current_uiout;
5757
5758 annotate_exited (exitstatus);
5759 if (exitstatus)
5760 {
5761 if (ui_out_is_mi_like_p (uiout))
5762 ui_out_field_string (uiout, "reason",
5763 async_reason_lookup (EXEC_ASYNC_EXITED));
5764 ui_out_text (uiout, "[Inferior ");
5765 ui_out_text (uiout, plongest (inf->num));
5766 ui_out_text (uiout, " (");
5767 ui_out_text (uiout, pidstr);
5768 ui_out_text (uiout, ") exited with code ");
5769 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5770 ui_out_text (uiout, "]\n");
5771 }
5772 else
5773 {
5774 if (ui_out_is_mi_like_p (uiout))
5775 ui_out_field_string
5776 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5777 ui_out_text (uiout, "[Inferior ");
5778 ui_out_text (uiout, plongest (inf->num));
5779 ui_out_text (uiout, " (");
5780 ui_out_text (uiout, pidstr);
5781 ui_out_text (uiout, ") exited normally]\n");
5782 }
5783 /* Support the --return-child-result option. */
5784 return_child_result_value = exitstatus;
5785 }
5786
5787 /* Signal received, print why the inferior has stopped. The signal table
5788 tells us to print about it. */
5789
5790 static void
5791 print_signal_received_reason (enum target_signal siggnal)
5792 {
5793 struct ui_out *uiout = current_uiout;
5794
5795 annotate_signal ();
5796
5797 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5798 {
5799 struct thread_info *t = inferior_thread ();
5800
5801 ui_out_text (uiout, "\n[");
5802 ui_out_field_string (uiout, "thread-name",
5803 target_pid_to_str (t->ptid));
5804 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5805 ui_out_text (uiout, " stopped");
5806 }
5807 else
5808 {
5809 ui_out_text (uiout, "\nProgram received signal ");
5810 annotate_signal_name ();
5811 if (ui_out_is_mi_like_p (uiout))
5812 ui_out_field_string
5813 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5814 ui_out_field_string (uiout, "signal-name",
5815 target_signal_to_name (siggnal));
5816 annotate_signal_name_end ();
5817 ui_out_text (uiout, ", ");
5818 annotate_signal_string ();
5819 ui_out_field_string (uiout, "signal-meaning",
5820 target_signal_to_string (siggnal));
5821 annotate_signal_string_end ();
5822 }
5823 ui_out_text (uiout, ".\n");
5824 }
5825
5826 /* Reverse execution: target ran out of history info, print why the inferior
5827 has stopped. */
5828
5829 static void
5830 print_no_history_reason (void)
5831 {
5832 ui_out_text (current_uiout, "\nNo more reverse-execution history.\n");
5833 }
5834
5835 /* Here to return control to GDB when the inferior stops for real.
5836 Print appropriate messages, remove breakpoints, give terminal our modes.
5837
5838 STOP_PRINT_FRAME nonzero means print the executing frame
5839 (pc, function, args, file, line number and line text).
5840 BREAKPOINTS_FAILED nonzero means stop was due to error
5841 attempting to insert breakpoints. */
5842
5843 void
5844 normal_stop (void)
5845 {
5846 struct target_waitstatus last;
5847 ptid_t last_ptid;
5848 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5849
5850 get_last_target_status (&last_ptid, &last);
5851
5852 /* If an exception is thrown from this point on, make sure to
5853 propagate GDB's knowledge of the executing state to the
5854 frontend/user running state. A QUIT is an easy exception to see
5855 here, so do this before any filtered output. */
5856 if (!non_stop)
5857 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5858 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5859 && last.kind != TARGET_WAITKIND_EXITED
5860 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5861 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5862
5863 /* In non-stop mode, we don't want GDB to switch threads behind the
5864 user's back, to avoid races where the user is typing a command to
5865 apply to thread x, but GDB switches to thread y before the user
5866 finishes entering the command. */
5867
5868 /* As with the notification of thread events, we want to delay
5869 notifying the user that we've switched thread context until
5870 the inferior actually stops.
5871
5872 There's no point in saying anything if the inferior has exited.
5873 Note that SIGNALLED here means "exited with a signal", not
5874 "received a signal". */
5875 if (!non_stop
5876 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5877 && target_has_execution
5878 && last.kind != TARGET_WAITKIND_SIGNALLED
5879 && last.kind != TARGET_WAITKIND_EXITED
5880 && last.kind != TARGET_WAITKIND_NO_RESUMED)
5881 {
5882 target_terminal_ours_for_output ();
5883 printf_filtered (_("[Switching to %s]\n"),
5884 target_pid_to_str (inferior_ptid));
5885 annotate_thread_changed ();
5886 previous_inferior_ptid = inferior_ptid;
5887 }
5888
5889 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
5890 {
5891 gdb_assert (sync_execution || !target_can_async_p ());
5892
5893 target_terminal_ours_for_output ();
5894 printf_filtered (_("No unwaited-for children left.\n"));
5895 }
5896
5897 if (!breakpoints_always_inserted_mode () && target_has_execution)
5898 {
5899 if (remove_breakpoints ())
5900 {
5901 target_terminal_ours_for_output ();
5902 printf_filtered (_("Cannot remove breakpoints because "
5903 "program is no longer writable.\nFurther "
5904 "execution is probably impossible.\n"));
5905 }
5906 }
5907
5908 /* If an auto-display called a function and that got a signal,
5909 delete that auto-display to avoid an infinite recursion. */
5910
5911 if (stopped_by_random_signal)
5912 disable_current_display ();
5913
5914 /* Don't print a message if in the middle of doing a "step n"
5915 operation for n > 1 */
5916 if (target_has_execution
5917 && last.kind != TARGET_WAITKIND_SIGNALLED
5918 && last.kind != TARGET_WAITKIND_EXITED
5919 && inferior_thread ()->step_multi
5920 && inferior_thread ()->control.stop_step)
5921 goto done;
5922
5923 target_terminal_ours ();
5924 async_enable_stdin ();
5925
5926 /* Set the current source location. This will also happen if we
5927 display the frame below, but the current SAL will be incorrect
5928 during a user hook-stop function. */
5929 if (has_stack_frames () && !stop_stack_dummy)
5930 set_current_sal_from_frame (get_current_frame (), 1);
5931
5932 /* Let the user/frontend see the threads as stopped. */
5933 do_cleanups (old_chain);
5934
5935 /* Look up the hook_stop and run it (CLI internally handles problem
5936 of stop_command's pre-hook not existing). */
5937 if (stop_command)
5938 catch_errors (hook_stop_stub, stop_command,
5939 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5940
5941 if (!has_stack_frames ())
5942 goto done;
5943
5944 if (last.kind == TARGET_WAITKIND_SIGNALLED
5945 || last.kind == TARGET_WAITKIND_EXITED)
5946 goto done;
5947
5948 /* Select innermost stack frame - i.e., current frame is frame 0,
5949 and current location is based on that.
5950 Don't do this on return from a stack dummy routine,
5951 or if the program has exited. */
5952
5953 if (!stop_stack_dummy)
5954 {
5955 select_frame (get_current_frame ());
5956
5957 /* Print current location without a level number, if
5958 we have changed functions or hit a breakpoint.
5959 Print source line if we have one.
5960 bpstat_print() contains the logic deciding in detail
5961 what to print, based on the event(s) that just occurred. */
5962
5963 /* If --batch-silent is enabled then there's no need to print the current
5964 source location, and to try risks causing an error message about
5965 missing source files. */
5966 if (stop_print_frame && !batch_silent)
5967 {
5968 int bpstat_ret;
5969 int source_flag;
5970 int do_frame_printing = 1;
5971 struct thread_info *tp = inferior_thread ();
5972
5973 bpstat_ret = bpstat_print (tp->control.stop_bpstat, last.kind);
5974 switch (bpstat_ret)
5975 {
5976 case PRINT_UNKNOWN:
5977 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5978 (or should) carry around the function and does (or
5979 should) use that when doing a frame comparison. */
5980 if (tp->control.stop_step
5981 && frame_id_eq (tp->control.step_frame_id,
5982 get_frame_id (get_current_frame ()))
5983 && step_start_function == find_pc_function (stop_pc))
5984 source_flag = SRC_LINE; /* Finished step, just
5985 print source line. */
5986 else
5987 source_flag = SRC_AND_LOC; /* Print location and
5988 source line. */
5989 break;
5990 case PRINT_SRC_AND_LOC:
5991 source_flag = SRC_AND_LOC; /* Print location and
5992 source line. */
5993 break;
5994 case PRINT_SRC_ONLY:
5995 source_flag = SRC_LINE;
5996 break;
5997 case PRINT_NOTHING:
5998 source_flag = SRC_LINE; /* something bogus */
5999 do_frame_printing = 0;
6000 break;
6001 default:
6002 internal_error (__FILE__, __LINE__, _("Unknown value."));
6003 }
6004
6005 /* The behavior of this routine with respect to the source
6006 flag is:
6007 SRC_LINE: Print only source line
6008 LOCATION: Print only location
6009 SRC_AND_LOC: Print location and source line. */
6010 if (do_frame_printing)
6011 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
6012
6013 /* Display the auto-display expressions. */
6014 do_displays ();
6015 }
6016 }
6017
6018 /* Save the function value return registers, if we care.
6019 We might be about to restore their previous contents. */
6020 if (inferior_thread ()->control.proceed_to_finish
6021 && execution_direction != EXEC_REVERSE)
6022 {
6023 /* This should not be necessary. */
6024 if (stop_registers)
6025 regcache_xfree (stop_registers);
6026
6027 /* NB: The copy goes through to the target picking up the value of
6028 all the registers. */
6029 stop_registers = regcache_dup (get_current_regcache ());
6030 }
6031
6032 if (stop_stack_dummy == STOP_STACK_DUMMY)
6033 {
6034 /* Pop the empty frame that contains the stack dummy.
6035 This also restores inferior state prior to the call
6036 (struct infcall_suspend_state). */
6037 struct frame_info *frame = get_current_frame ();
6038
6039 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6040 frame_pop (frame);
6041 /* frame_pop() calls reinit_frame_cache as the last thing it
6042 does which means there's currently no selected frame. We
6043 don't need to re-establish a selected frame if the dummy call
6044 returns normally, that will be done by
6045 restore_infcall_control_state. However, we do have to handle
6046 the case where the dummy call is returning after being
6047 stopped (e.g. the dummy call previously hit a breakpoint).
6048 We can't know which case we have so just always re-establish
6049 a selected frame here. */
6050 select_frame (get_current_frame ());
6051 }
6052
6053 done:
6054 annotate_stopped ();
6055
6056 /* Suppress the stop observer if we're in the middle of:
6057
6058 - a step n (n > 1), as there still more steps to be done.
6059
6060 - a "finish" command, as the observer will be called in
6061 finish_command_continuation, so it can include the inferior
6062 function's return value.
6063
6064 - calling an inferior function, as we pretend we inferior didn't
6065 run at all. The return value of the call is handled by the
6066 expression evaluator, through call_function_by_hand. */
6067
6068 if (!target_has_execution
6069 || last.kind == TARGET_WAITKIND_SIGNALLED
6070 || last.kind == TARGET_WAITKIND_EXITED
6071 || last.kind == TARGET_WAITKIND_NO_RESUMED
6072 || (!(inferior_thread ()->step_multi
6073 && inferior_thread ()->control.stop_step)
6074 && !(inferior_thread ()->control.stop_bpstat
6075 && inferior_thread ()->control.proceed_to_finish)
6076 && !inferior_thread ()->control.in_infcall))
6077 {
6078 if (!ptid_equal (inferior_ptid, null_ptid))
6079 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6080 stop_print_frame);
6081 else
6082 observer_notify_normal_stop (NULL, stop_print_frame);
6083 }
6084
6085 if (target_has_execution)
6086 {
6087 if (last.kind != TARGET_WAITKIND_SIGNALLED
6088 && last.kind != TARGET_WAITKIND_EXITED)
6089 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6090 Delete any breakpoint that is to be deleted at the next stop. */
6091 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6092 }
6093
6094 /* Try to get rid of automatically added inferiors that are no
6095 longer needed. Keeping those around slows down things linearly.
6096 Note that this never removes the current inferior. */
6097 prune_inferiors ();
6098 }
6099
6100 static int
6101 hook_stop_stub (void *cmd)
6102 {
6103 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6104 return (0);
6105 }
6106 \f
6107 int
6108 signal_stop_state (int signo)
6109 {
6110 return signal_stop[signo];
6111 }
6112
6113 int
6114 signal_print_state (int signo)
6115 {
6116 return signal_print[signo];
6117 }
6118
6119 int
6120 signal_pass_state (int signo)
6121 {
6122 return signal_program[signo];
6123 }
6124
6125 static void
6126 signal_cache_update (int signo)
6127 {
6128 if (signo == -1)
6129 {
6130 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
6131 signal_cache_update (signo);
6132
6133 return;
6134 }
6135
6136 signal_pass[signo] = (signal_stop[signo] == 0
6137 && signal_print[signo] == 0
6138 && signal_program[signo] == 1);
6139 }
6140
6141 int
6142 signal_stop_update (int signo, int state)
6143 {
6144 int ret = signal_stop[signo];
6145
6146 signal_stop[signo] = state;
6147 signal_cache_update (signo);
6148 return ret;
6149 }
6150
6151 int
6152 signal_print_update (int signo, int state)
6153 {
6154 int ret = signal_print[signo];
6155
6156 signal_print[signo] = state;
6157 signal_cache_update (signo);
6158 return ret;
6159 }
6160
6161 int
6162 signal_pass_update (int signo, int state)
6163 {
6164 int ret = signal_program[signo];
6165
6166 signal_program[signo] = state;
6167 signal_cache_update (signo);
6168 return ret;
6169 }
6170
6171 static void
6172 sig_print_header (void)
6173 {
6174 printf_filtered (_("Signal Stop\tPrint\tPass "
6175 "to program\tDescription\n"));
6176 }
6177
6178 static void
6179 sig_print_info (enum target_signal oursig)
6180 {
6181 const char *name = target_signal_to_name (oursig);
6182 int name_padding = 13 - strlen (name);
6183
6184 if (name_padding <= 0)
6185 name_padding = 0;
6186
6187 printf_filtered ("%s", name);
6188 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6189 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6190 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6191 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6192 printf_filtered ("%s\n", target_signal_to_string (oursig));
6193 }
6194
6195 /* Specify how various signals in the inferior should be handled. */
6196
6197 static void
6198 handle_command (char *args, int from_tty)
6199 {
6200 char **argv;
6201 int digits, wordlen;
6202 int sigfirst, signum, siglast;
6203 enum target_signal oursig;
6204 int allsigs;
6205 int nsigs;
6206 unsigned char *sigs;
6207 struct cleanup *old_chain;
6208
6209 if (args == NULL)
6210 {
6211 error_no_arg (_("signal to handle"));
6212 }
6213
6214 /* Allocate and zero an array of flags for which signals to handle. */
6215
6216 nsigs = (int) TARGET_SIGNAL_LAST;
6217 sigs = (unsigned char *) alloca (nsigs);
6218 memset (sigs, 0, nsigs);
6219
6220 /* Break the command line up into args. */
6221
6222 argv = gdb_buildargv (args);
6223 old_chain = make_cleanup_freeargv (argv);
6224
6225 /* Walk through the args, looking for signal oursigs, signal names, and
6226 actions. Signal numbers and signal names may be interspersed with
6227 actions, with the actions being performed for all signals cumulatively
6228 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6229
6230 while (*argv != NULL)
6231 {
6232 wordlen = strlen (*argv);
6233 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6234 {;
6235 }
6236 allsigs = 0;
6237 sigfirst = siglast = -1;
6238
6239 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6240 {
6241 /* Apply action to all signals except those used by the
6242 debugger. Silently skip those. */
6243 allsigs = 1;
6244 sigfirst = 0;
6245 siglast = nsigs - 1;
6246 }
6247 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6248 {
6249 SET_SIGS (nsigs, sigs, signal_stop);
6250 SET_SIGS (nsigs, sigs, signal_print);
6251 }
6252 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6253 {
6254 UNSET_SIGS (nsigs, sigs, signal_program);
6255 }
6256 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6257 {
6258 SET_SIGS (nsigs, sigs, signal_print);
6259 }
6260 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6261 {
6262 SET_SIGS (nsigs, sigs, signal_program);
6263 }
6264 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6265 {
6266 UNSET_SIGS (nsigs, sigs, signal_stop);
6267 }
6268 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6269 {
6270 SET_SIGS (nsigs, sigs, signal_program);
6271 }
6272 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6273 {
6274 UNSET_SIGS (nsigs, sigs, signal_print);
6275 UNSET_SIGS (nsigs, sigs, signal_stop);
6276 }
6277 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6278 {
6279 UNSET_SIGS (nsigs, sigs, signal_program);
6280 }
6281 else if (digits > 0)
6282 {
6283 /* It is numeric. The numeric signal refers to our own
6284 internal signal numbering from target.h, not to host/target
6285 signal number. This is a feature; users really should be
6286 using symbolic names anyway, and the common ones like
6287 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6288
6289 sigfirst = siglast = (int)
6290 target_signal_from_command (atoi (*argv));
6291 if ((*argv)[digits] == '-')
6292 {
6293 siglast = (int)
6294 target_signal_from_command (atoi ((*argv) + digits + 1));
6295 }
6296 if (sigfirst > siglast)
6297 {
6298 /* Bet he didn't figure we'd think of this case... */
6299 signum = sigfirst;
6300 sigfirst = siglast;
6301 siglast = signum;
6302 }
6303 }
6304 else
6305 {
6306 oursig = target_signal_from_name (*argv);
6307 if (oursig != TARGET_SIGNAL_UNKNOWN)
6308 {
6309 sigfirst = siglast = (int) oursig;
6310 }
6311 else
6312 {
6313 /* Not a number and not a recognized flag word => complain. */
6314 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6315 }
6316 }
6317
6318 /* If any signal numbers or symbol names were found, set flags for
6319 which signals to apply actions to. */
6320
6321 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6322 {
6323 switch ((enum target_signal) signum)
6324 {
6325 case TARGET_SIGNAL_TRAP:
6326 case TARGET_SIGNAL_INT:
6327 if (!allsigs && !sigs[signum])
6328 {
6329 if (query (_("%s is used by the debugger.\n\
6330 Are you sure you want to change it? "),
6331 target_signal_to_name ((enum target_signal) signum)))
6332 {
6333 sigs[signum] = 1;
6334 }
6335 else
6336 {
6337 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6338 gdb_flush (gdb_stdout);
6339 }
6340 }
6341 break;
6342 case TARGET_SIGNAL_0:
6343 case TARGET_SIGNAL_DEFAULT:
6344 case TARGET_SIGNAL_UNKNOWN:
6345 /* Make sure that "all" doesn't print these. */
6346 break;
6347 default:
6348 sigs[signum] = 1;
6349 break;
6350 }
6351 }
6352
6353 argv++;
6354 }
6355
6356 for (signum = 0; signum < nsigs; signum++)
6357 if (sigs[signum])
6358 {
6359 signal_cache_update (-1);
6360 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6361
6362 if (from_tty)
6363 {
6364 /* Show the results. */
6365 sig_print_header ();
6366 for (; signum < nsigs; signum++)
6367 if (sigs[signum])
6368 sig_print_info (signum);
6369 }
6370
6371 break;
6372 }
6373
6374 do_cleanups (old_chain);
6375 }
6376
6377 static void
6378 xdb_handle_command (char *args, int from_tty)
6379 {
6380 char **argv;
6381 struct cleanup *old_chain;
6382
6383 if (args == NULL)
6384 error_no_arg (_("xdb command"));
6385
6386 /* Break the command line up into args. */
6387
6388 argv = gdb_buildargv (args);
6389 old_chain = make_cleanup_freeargv (argv);
6390 if (argv[1] != (char *) NULL)
6391 {
6392 char *argBuf;
6393 int bufLen;
6394
6395 bufLen = strlen (argv[0]) + 20;
6396 argBuf = (char *) xmalloc (bufLen);
6397 if (argBuf)
6398 {
6399 int validFlag = 1;
6400 enum target_signal oursig;
6401
6402 oursig = target_signal_from_name (argv[0]);
6403 memset (argBuf, 0, bufLen);
6404 if (strcmp (argv[1], "Q") == 0)
6405 sprintf (argBuf, "%s %s", argv[0], "noprint");
6406 else
6407 {
6408 if (strcmp (argv[1], "s") == 0)
6409 {
6410 if (!signal_stop[oursig])
6411 sprintf (argBuf, "%s %s", argv[0], "stop");
6412 else
6413 sprintf (argBuf, "%s %s", argv[0], "nostop");
6414 }
6415 else if (strcmp (argv[1], "i") == 0)
6416 {
6417 if (!signal_program[oursig])
6418 sprintf (argBuf, "%s %s", argv[0], "pass");
6419 else
6420 sprintf (argBuf, "%s %s", argv[0], "nopass");
6421 }
6422 else if (strcmp (argv[1], "r") == 0)
6423 {
6424 if (!signal_print[oursig])
6425 sprintf (argBuf, "%s %s", argv[0], "print");
6426 else
6427 sprintf (argBuf, "%s %s", argv[0], "noprint");
6428 }
6429 else
6430 validFlag = 0;
6431 }
6432 if (validFlag)
6433 handle_command (argBuf, from_tty);
6434 else
6435 printf_filtered (_("Invalid signal handling flag.\n"));
6436 if (argBuf)
6437 xfree (argBuf);
6438 }
6439 }
6440 do_cleanups (old_chain);
6441 }
6442
6443 /* Print current contents of the tables set by the handle command.
6444 It is possible we should just be printing signals actually used
6445 by the current target (but for things to work right when switching
6446 targets, all signals should be in the signal tables). */
6447
6448 static void
6449 signals_info (char *signum_exp, int from_tty)
6450 {
6451 enum target_signal oursig;
6452
6453 sig_print_header ();
6454
6455 if (signum_exp)
6456 {
6457 /* First see if this is a symbol name. */
6458 oursig = target_signal_from_name (signum_exp);
6459 if (oursig == TARGET_SIGNAL_UNKNOWN)
6460 {
6461 /* No, try numeric. */
6462 oursig =
6463 target_signal_from_command (parse_and_eval_long (signum_exp));
6464 }
6465 sig_print_info (oursig);
6466 return;
6467 }
6468
6469 printf_filtered ("\n");
6470 /* These ugly casts brought to you by the native VAX compiler. */
6471 for (oursig = TARGET_SIGNAL_FIRST;
6472 (int) oursig < (int) TARGET_SIGNAL_LAST;
6473 oursig = (enum target_signal) ((int) oursig + 1))
6474 {
6475 QUIT;
6476
6477 if (oursig != TARGET_SIGNAL_UNKNOWN
6478 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6479 sig_print_info (oursig);
6480 }
6481
6482 printf_filtered (_("\nUse the \"handle\" command "
6483 "to change these tables.\n"));
6484 }
6485
6486 /* Check if it makes sense to read $_siginfo from the current thread
6487 at this point. If not, throw an error. */
6488
6489 static void
6490 validate_siginfo_access (void)
6491 {
6492 /* No current inferior, no siginfo. */
6493 if (ptid_equal (inferior_ptid, null_ptid))
6494 error (_("No thread selected."));
6495
6496 /* Don't try to read from a dead thread. */
6497 if (is_exited (inferior_ptid))
6498 error (_("The current thread has terminated"));
6499
6500 /* ... or from a spinning thread. */
6501 if (is_running (inferior_ptid))
6502 error (_("Selected thread is running."));
6503 }
6504
6505 /* The $_siginfo convenience variable is a bit special. We don't know
6506 for sure the type of the value until we actually have a chance to
6507 fetch the data. The type can change depending on gdbarch, so it is
6508 also dependent on which thread you have selected.
6509
6510 1. making $_siginfo be an internalvar that creates a new value on
6511 access.
6512
6513 2. making the value of $_siginfo be an lval_computed value. */
6514
6515 /* This function implements the lval_computed support for reading a
6516 $_siginfo value. */
6517
6518 static void
6519 siginfo_value_read (struct value *v)
6520 {
6521 LONGEST transferred;
6522
6523 validate_siginfo_access ();
6524
6525 transferred =
6526 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6527 NULL,
6528 value_contents_all_raw (v),
6529 value_offset (v),
6530 TYPE_LENGTH (value_type (v)));
6531
6532 if (transferred != TYPE_LENGTH (value_type (v)))
6533 error (_("Unable to read siginfo"));
6534 }
6535
6536 /* This function implements the lval_computed support for writing a
6537 $_siginfo value. */
6538
6539 static void
6540 siginfo_value_write (struct value *v, struct value *fromval)
6541 {
6542 LONGEST transferred;
6543
6544 validate_siginfo_access ();
6545
6546 transferred = target_write (&current_target,
6547 TARGET_OBJECT_SIGNAL_INFO,
6548 NULL,
6549 value_contents_all_raw (fromval),
6550 value_offset (v),
6551 TYPE_LENGTH (value_type (fromval)));
6552
6553 if (transferred != TYPE_LENGTH (value_type (fromval)))
6554 error (_("Unable to write siginfo"));
6555 }
6556
6557 static const struct lval_funcs siginfo_value_funcs =
6558 {
6559 siginfo_value_read,
6560 siginfo_value_write
6561 };
6562
6563 /* Return a new value with the correct type for the siginfo object of
6564 the current thread using architecture GDBARCH. Return a void value
6565 if there's no object available. */
6566
6567 static struct value *
6568 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6569 {
6570 if (target_has_stack
6571 && !ptid_equal (inferior_ptid, null_ptid)
6572 && gdbarch_get_siginfo_type_p (gdbarch))
6573 {
6574 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6575
6576 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6577 }
6578
6579 return allocate_value (builtin_type (gdbarch)->builtin_void);
6580 }
6581
6582 \f
6583 /* infcall_suspend_state contains state about the program itself like its
6584 registers and any signal it received when it last stopped.
6585 This state must be restored regardless of how the inferior function call
6586 ends (either successfully, or after it hits a breakpoint or signal)
6587 if the program is to properly continue where it left off. */
6588
6589 struct infcall_suspend_state
6590 {
6591 struct thread_suspend_state thread_suspend;
6592 struct inferior_suspend_state inferior_suspend;
6593
6594 /* Other fields: */
6595 CORE_ADDR stop_pc;
6596 struct regcache *registers;
6597
6598 /* Format of SIGINFO_DATA or NULL if it is not present. */
6599 struct gdbarch *siginfo_gdbarch;
6600
6601 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6602 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6603 content would be invalid. */
6604 gdb_byte *siginfo_data;
6605 };
6606
6607 struct infcall_suspend_state *
6608 save_infcall_suspend_state (void)
6609 {
6610 struct infcall_suspend_state *inf_state;
6611 struct thread_info *tp = inferior_thread ();
6612 struct inferior *inf = current_inferior ();
6613 struct regcache *regcache = get_current_regcache ();
6614 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6615 gdb_byte *siginfo_data = NULL;
6616
6617 if (gdbarch_get_siginfo_type_p (gdbarch))
6618 {
6619 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6620 size_t len = TYPE_LENGTH (type);
6621 struct cleanup *back_to;
6622
6623 siginfo_data = xmalloc (len);
6624 back_to = make_cleanup (xfree, siginfo_data);
6625
6626 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6627 siginfo_data, 0, len) == len)
6628 discard_cleanups (back_to);
6629 else
6630 {
6631 /* Errors ignored. */
6632 do_cleanups (back_to);
6633 siginfo_data = NULL;
6634 }
6635 }
6636
6637 inf_state = XZALLOC (struct infcall_suspend_state);
6638
6639 if (siginfo_data)
6640 {
6641 inf_state->siginfo_gdbarch = gdbarch;
6642 inf_state->siginfo_data = siginfo_data;
6643 }
6644
6645 inf_state->thread_suspend = tp->suspend;
6646 inf_state->inferior_suspend = inf->suspend;
6647
6648 /* run_inferior_call will not use the signal due to its `proceed' call with
6649 TARGET_SIGNAL_0 anyway. */
6650 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6651
6652 inf_state->stop_pc = stop_pc;
6653
6654 inf_state->registers = regcache_dup (regcache);
6655
6656 return inf_state;
6657 }
6658
6659 /* Restore inferior session state to INF_STATE. */
6660
6661 void
6662 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6663 {
6664 struct thread_info *tp = inferior_thread ();
6665 struct inferior *inf = current_inferior ();
6666 struct regcache *regcache = get_current_regcache ();
6667 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6668
6669 tp->suspend = inf_state->thread_suspend;
6670 inf->suspend = inf_state->inferior_suspend;
6671
6672 stop_pc = inf_state->stop_pc;
6673
6674 if (inf_state->siginfo_gdbarch == gdbarch)
6675 {
6676 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6677 size_t len = TYPE_LENGTH (type);
6678
6679 /* Errors ignored. */
6680 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6681 inf_state->siginfo_data, 0, len);
6682 }
6683
6684 /* The inferior can be gone if the user types "print exit(0)"
6685 (and perhaps other times). */
6686 if (target_has_execution)
6687 /* NB: The register write goes through to the target. */
6688 regcache_cpy (regcache, inf_state->registers);
6689
6690 discard_infcall_suspend_state (inf_state);
6691 }
6692
6693 static void
6694 do_restore_infcall_suspend_state_cleanup (void *state)
6695 {
6696 restore_infcall_suspend_state (state);
6697 }
6698
6699 struct cleanup *
6700 make_cleanup_restore_infcall_suspend_state
6701 (struct infcall_suspend_state *inf_state)
6702 {
6703 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6704 }
6705
6706 void
6707 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6708 {
6709 regcache_xfree (inf_state->registers);
6710 xfree (inf_state->siginfo_data);
6711 xfree (inf_state);
6712 }
6713
6714 struct regcache *
6715 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6716 {
6717 return inf_state->registers;
6718 }
6719
6720 /* infcall_control_state contains state regarding gdb's control of the
6721 inferior itself like stepping control. It also contains session state like
6722 the user's currently selected frame. */
6723
6724 struct infcall_control_state
6725 {
6726 struct thread_control_state thread_control;
6727 struct inferior_control_state inferior_control;
6728
6729 /* Other fields: */
6730 enum stop_stack_kind stop_stack_dummy;
6731 int stopped_by_random_signal;
6732 int stop_after_trap;
6733
6734 /* ID if the selected frame when the inferior function call was made. */
6735 struct frame_id selected_frame_id;
6736 };
6737
6738 /* Save all of the information associated with the inferior<==>gdb
6739 connection. */
6740
6741 struct infcall_control_state *
6742 save_infcall_control_state (void)
6743 {
6744 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6745 struct thread_info *tp = inferior_thread ();
6746 struct inferior *inf = current_inferior ();
6747
6748 inf_status->thread_control = tp->control;
6749 inf_status->inferior_control = inf->control;
6750
6751 tp->control.step_resume_breakpoint = NULL;
6752 tp->control.exception_resume_breakpoint = NULL;
6753
6754 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6755 chain. If caller's caller is walking the chain, they'll be happier if we
6756 hand them back the original chain when restore_infcall_control_state is
6757 called. */
6758 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6759
6760 /* Other fields: */
6761 inf_status->stop_stack_dummy = stop_stack_dummy;
6762 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6763 inf_status->stop_after_trap = stop_after_trap;
6764
6765 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6766
6767 return inf_status;
6768 }
6769
6770 static int
6771 restore_selected_frame (void *args)
6772 {
6773 struct frame_id *fid = (struct frame_id *) args;
6774 struct frame_info *frame;
6775
6776 frame = frame_find_by_id (*fid);
6777
6778 /* If inf_status->selected_frame_id is NULL, there was no previously
6779 selected frame. */
6780 if (frame == NULL)
6781 {
6782 warning (_("Unable to restore previously selected frame."));
6783 return 0;
6784 }
6785
6786 select_frame (frame);
6787
6788 return (1);
6789 }
6790
6791 /* Restore inferior session state to INF_STATUS. */
6792
6793 void
6794 restore_infcall_control_state (struct infcall_control_state *inf_status)
6795 {
6796 struct thread_info *tp = inferior_thread ();
6797 struct inferior *inf = current_inferior ();
6798
6799 if (tp->control.step_resume_breakpoint)
6800 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6801
6802 if (tp->control.exception_resume_breakpoint)
6803 tp->control.exception_resume_breakpoint->disposition
6804 = disp_del_at_next_stop;
6805
6806 /* Handle the bpstat_copy of the chain. */
6807 bpstat_clear (&tp->control.stop_bpstat);
6808
6809 tp->control = inf_status->thread_control;
6810 inf->control = inf_status->inferior_control;
6811
6812 /* Other fields: */
6813 stop_stack_dummy = inf_status->stop_stack_dummy;
6814 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6815 stop_after_trap = inf_status->stop_after_trap;
6816
6817 if (target_has_stack)
6818 {
6819 /* The point of catch_errors is that if the stack is clobbered,
6820 walking the stack might encounter a garbage pointer and
6821 error() trying to dereference it. */
6822 if (catch_errors
6823 (restore_selected_frame, &inf_status->selected_frame_id,
6824 "Unable to restore previously selected frame:\n",
6825 RETURN_MASK_ERROR) == 0)
6826 /* Error in restoring the selected frame. Select the innermost
6827 frame. */
6828 select_frame (get_current_frame ());
6829 }
6830
6831 xfree (inf_status);
6832 }
6833
6834 static void
6835 do_restore_infcall_control_state_cleanup (void *sts)
6836 {
6837 restore_infcall_control_state (sts);
6838 }
6839
6840 struct cleanup *
6841 make_cleanup_restore_infcall_control_state
6842 (struct infcall_control_state *inf_status)
6843 {
6844 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6845 }
6846
6847 void
6848 discard_infcall_control_state (struct infcall_control_state *inf_status)
6849 {
6850 if (inf_status->thread_control.step_resume_breakpoint)
6851 inf_status->thread_control.step_resume_breakpoint->disposition
6852 = disp_del_at_next_stop;
6853
6854 if (inf_status->thread_control.exception_resume_breakpoint)
6855 inf_status->thread_control.exception_resume_breakpoint->disposition
6856 = disp_del_at_next_stop;
6857
6858 /* See save_infcall_control_state for info on stop_bpstat. */
6859 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6860
6861 xfree (inf_status);
6862 }
6863 \f
6864 int
6865 ptid_match (ptid_t ptid, ptid_t filter)
6866 {
6867 if (ptid_equal (filter, minus_one_ptid))
6868 return 1;
6869 if (ptid_is_pid (filter)
6870 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6871 return 1;
6872 else if (ptid_equal (ptid, filter))
6873 return 1;
6874
6875 return 0;
6876 }
6877
6878 /* restore_inferior_ptid() will be used by the cleanup machinery
6879 to restore the inferior_ptid value saved in a call to
6880 save_inferior_ptid(). */
6881
6882 static void
6883 restore_inferior_ptid (void *arg)
6884 {
6885 ptid_t *saved_ptid_ptr = arg;
6886
6887 inferior_ptid = *saved_ptid_ptr;
6888 xfree (arg);
6889 }
6890
6891 /* Save the value of inferior_ptid so that it may be restored by a
6892 later call to do_cleanups(). Returns the struct cleanup pointer
6893 needed for later doing the cleanup. */
6894
6895 struct cleanup *
6896 save_inferior_ptid (void)
6897 {
6898 ptid_t *saved_ptid_ptr;
6899
6900 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6901 *saved_ptid_ptr = inferior_ptid;
6902 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6903 }
6904 \f
6905
6906 /* User interface for reverse debugging:
6907 Set exec-direction / show exec-direction commands
6908 (returns error unless target implements to_set_exec_direction method). */
6909
6910 int execution_direction = EXEC_FORWARD;
6911 static const char exec_forward[] = "forward";
6912 static const char exec_reverse[] = "reverse";
6913 static const char *exec_direction = exec_forward;
6914 static const char *const exec_direction_names[] = {
6915 exec_forward,
6916 exec_reverse,
6917 NULL
6918 };
6919
6920 static void
6921 set_exec_direction_func (char *args, int from_tty,
6922 struct cmd_list_element *cmd)
6923 {
6924 if (target_can_execute_reverse)
6925 {
6926 if (!strcmp (exec_direction, exec_forward))
6927 execution_direction = EXEC_FORWARD;
6928 else if (!strcmp (exec_direction, exec_reverse))
6929 execution_direction = EXEC_REVERSE;
6930 }
6931 else
6932 {
6933 exec_direction = exec_forward;
6934 error (_("Target does not support this operation."));
6935 }
6936 }
6937
6938 static void
6939 show_exec_direction_func (struct ui_file *out, int from_tty,
6940 struct cmd_list_element *cmd, const char *value)
6941 {
6942 switch (execution_direction) {
6943 case EXEC_FORWARD:
6944 fprintf_filtered (out, _("Forward.\n"));
6945 break;
6946 case EXEC_REVERSE:
6947 fprintf_filtered (out, _("Reverse.\n"));
6948 break;
6949 default:
6950 internal_error (__FILE__, __LINE__,
6951 _("bogus execution_direction value: %d"),
6952 (int) execution_direction);
6953 }
6954 }
6955
6956 /* User interface for non-stop mode. */
6957
6958 int non_stop = 0;
6959
6960 static void
6961 set_non_stop (char *args, int from_tty,
6962 struct cmd_list_element *c)
6963 {
6964 if (target_has_execution)
6965 {
6966 non_stop_1 = non_stop;
6967 error (_("Cannot change this setting while the inferior is running."));
6968 }
6969
6970 non_stop = non_stop_1;
6971 }
6972
6973 static void
6974 show_non_stop (struct ui_file *file, int from_tty,
6975 struct cmd_list_element *c, const char *value)
6976 {
6977 fprintf_filtered (file,
6978 _("Controlling the inferior in non-stop mode is %s.\n"),
6979 value);
6980 }
6981
6982 static void
6983 show_schedule_multiple (struct ui_file *file, int from_tty,
6984 struct cmd_list_element *c, const char *value)
6985 {
6986 fprintf_filtered (file, _("Resuming the execution of threads "
6987 "of all processes is %s.\n"), value);
6988 }
6989
6990 void
6991 _initialize_infrun (void)
6992 {
6993 int i;
6994 int numsigs;
6995
6996 add_info ("signals", signals_info, _("\
6997 What debugger does when program gets various signals.\n\
6998 Specify a signal as argument to print info on that signal only."));
6999 add_info_alias ("handle", "signals", 0);
7000
7001 add_com ("handle", class_run, handle_command, _("\
7002 Specify how to handle a signal.\n\
7003 Args are signals and actions to apply to those signals.\n\
7004 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7005 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7006 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7007 The special arg \"all\" is recognized to mean all signals except those\n\
7008 used by the debugger, typically SIGTRAP and SIGINT.\n\
7009 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7010 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7011 Stop means reenter debugger if this signal happens (implies print).\n\
7012 Print means print a message if this signal happens.\n\
7013 Pass means let program see this signal; otherwise program doesn't know.\n\
7014 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7015 Pass and Stop may be combined."));
7016 if (xdb_commands)
7017 {
7018 add_com ("lz", class_info, signals_info, _("\
7019 What debugger does when program gets various signals.\n\
7020 Specify a signal as argument to print info on that signal only."));
7021 add_com ("z", class_run, xdb_handle_command, _("\
7022 Specify how to handle a signal.\n\
7023 Args are signals and actions to apply to those signals.\n\
7024 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7025 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7026 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7027 The special arg \"all\" is recognized to mean all signals except those\n\
7028 used by the debugger, typically SIGTRAP and SIGINT.\n\
7029 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7030 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7031 nopass), \"Q\" (noprint)\n\
7032 Stop means reenter debugger if this signal happens (implies print).\n\
7033 Print means print a message if this signal happens.\n\
7034 Pass means let program see this signal; otherwise program doesn't know.\n\
7035 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7036 Pass and Stop may be combined."));
7037 }
7038
7039 if (!dbx_commands)
7040 stop_command = add_cmd ("stop", class_obscure,
7041 not_just_help_class_command, _("\
7042 There is no `stop' command, but you can set a hook on `stop'.\n\
7043 This allows you to set a list of commands to be run each time execution\n\
7044 of the program stops."), &cmdlist);
7045
7046 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7047 Set inferior debugging."), _("\
7048 Show inferior debugging."), _("\
7049 When non-zero, inferior specific debugging is enabled."),
7050 NULL,
7051 show_debug_infrun,
7052 &setdebuglist, &showdebuglist);
7053
7054 add_setshow_boolean_cmd ("displaced", class_maintenance,
7055 &debug_displaced, _("\
7056 Set displaced stepping debugging."), _("\
7057 Show displaced stepping debugging."), _("\
7058 When non-zero, displaced stepping specific debugging is enabled."),
7059 NULL,
7060 show_debug_displaced,
7061 &setdebuglist, &showdebuglist);
7062
7063 add_setshow_boolean_cmd ("non-stop", no_class,
7064 &non_stop_1, _("\
7065 Set whether gdb controls the inferior in non-stop mode."), _("\
7066 Show whether gdb controls the inferior in non-stop mode."), _("\
7067 When debugging a multi-threaded program and this setting is\n\
7068 off (the default, also called all-stop mode), when one thread stops\n\
7069 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7070 all other threads in the program while you interact with the thread of\n\
7071 interest. When you continue or step a thread, you can allow the other\n\
7072 threads to run, or have them remain stopped, but while you inspect any\n\
7073 thread's state, all threads stop.\n\
7074 \n\
7075 In non-stop mode, when one thread stops, other threads can continue\n\
7076 to run freely. You'll be able to step each thread independently,\n\
7077 leave it stopped or free to run as needed."),
7078 set_non_stop,
7079 show_non_stop,
7080 &setlist,
7081 &showlist);
7082
7083 numsigs = (int) TARGET_SIGNAL_LAST;
7084 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7085 signal_print = (unsigned char *)
7086 xmalloc (sizeof (signal_print[0]) * numsigs);
7087 signal_program = (unsigned char *)
7088 xmalloc (sizeof (signal_program[0]) * numsigs);
7089 signal_pass = (unsigned char *)
7090 xmalloc (sizeof (signal_program[0]) * numsigs);
7091 for (i = 0; i < numsigs; i++)
7092 {
7093 signal_stop[i] = 1;
7094 signal_print[i] = 1;
7095 signal_program[i] = 1;
7096 }
7097
7098 /* Signals caused by debugger's own actions
7099 should not be given to the program afterwards. */
7100 signal_program[TARGET_SIGNAL_TRAP] = 0;
7101 signal_program[TARGET_SIGNAL_INT] = 0;
7102
7103 /* Signals that are not errors should not normally enter the debugger. */
7104 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7105 signal_print[TARGET_SIGNAL_ALRM] = 0;
7106 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7107 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7108 signal_stop[TARGET_SIGNAL_PROF] = 0;
7109 signal_print[TARGET_SIGNAL_PROF] = 0;
7110 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7111 signal_print[TARGET_SIGNAL_CHLD] = 0;
7112 signal_stop[TARGET_SIGNAL_IO] = 0;
7113 signal_print[TARGET_SIGNAL_IO] = 0;
7114 signal_stop[TARGET_SIGNAL_POLL] = 0;
7115 signal_print[TARGET_SIGNAL_POLL] = 0;
7116 signal_stop[TARGET_SIGNAL_URG] = 0;
7117 signal_print[TARGET_SIGNAL_URG] = 0;
7118 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7119 signal_print[TARGET_SIGNAL_WINCH] = 0;
7120 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7121 signal_print[TARGET_SIGNAL_PRIO] = 0;
7122
7123 /* These signals are used internally by user-level thread
7124 implementations. (See signal(5) on Solaris.) Like the above
7125 signals, a healthy program receives and handles them as part of
7126 its normal operation. */
7127 signal_stop[TARGET_SIGNAL_LWP] = 0;
7128 signal_print[TARGET_SIGNAL_LWP] = 0;
7129 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7130 signal_print[TARGET_SIGNAL_WAITING] = 0;
7131 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7132 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7133
7134 /* Update cached state. */
7135 signal_cache_update (-1);
7136
7137 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7138 &stop_on_solib_events, _("\
7139 Set stopping for shared library events."), _("\
7140 Show stopping for shared library events."), _("\
7141 If nonzero, gdb will give control to the user when the dynamic linker\n\
7142 notifies gdb of shared library events. The most common event of interest\n\
7143 to the user would be loading/unloading of a new library."),
7144 NULL,
7145 show_stop_on_solib_events,
7146 &setlist, &showlist);
7147
7148 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7149 follow_fork_mode_kind_names,
7150 &follow_fork_mode_string, _("\
7151 Set debugger response to a program call of fork or vfork."), _("\
7152 Show debugger response to a program call of fork or vfork."), _("\
7153 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7154 parent - the original process is debugged after a fork\n\
7155 child - the new process is debugged after a fork\n\
7156 The unfollowed process will continue to run.\n\
7157 By default, the debugger will follow the parent process."),
7158 NULL,
7159 show_follow_fork_mode_string,
7160 &setlist, &showlist);
7161
7162 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7163 follow_exec_mode_names,
7164 &follow_exec_mode_string, _("\
7165 Set debugger response to a program call of exec."), _("\
7166 Show debugger response to a program call of exec."), _("\
7167 An exec call replaces the program image of a process.\n\
7168 \n\
7169 follow-exec-mode can be:\n\
7170 \n\
7171 new - the debugger creates a new inferior and rebinds the process\n\
7172 to this new inferior. The program the process was running before\n\
7173 the exec call can be restarted afterwards by restarting the original\n\
7174 inferior.\n\
7175 \n\
7176 same - the debugger keeps the process bound to the same inferior.\n\
7177 The new executable image replaces the previous executable loaded in\n\
7178 the inferior. Restarting the inferior after the exec call restarts\n\
7179 the executable the process was running after the exec call.\n\
7180 \n\
7181 By default, the debugger will use the same inferior."),
7182 NULL,
7183 show_follow_exec_mode_string,
7184 &setlist, &showlist);
7185
7186 add_setshow_enum_cmd ("scheduler-locking", class_run,
7187 scheduler_enums, &scheduler_mode, _("\
7188 Set mode for locking scheduler during execution."), _("\
7189 Show mode for locking scheduler during execution."), _("\
7190 off == no locking (threads may preempt at any time)\n\
7191 on == full locking (no thread except the current thread may run)\n\
7192 step == scheduler locked during every single-step operation.\n\
7193 In this mode, no other thread may run during a step command.\n\
7194 Other threads may run while stepping over a function call ('next')."),
7195 set_schedlock_func, /* traps on target vector */
7196 show_scheduler_mode,
7197 &setlist, &showlist);
7198
7199 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7200 Set mode for resuming threads of all processes."), _("\
7201 Show mode for resuming threads of all processes."), _("\
7202 When on, execution commands (such as 'continue' or 'next') resume all\n\
7203 threads of all processes. When off (which is the default), execution\n\
7204 commands only resume the threads of the current process. The set of\n\
7205 threads that are resumed is further refined by the scheduler-locking\n\
7206 mode (see help set scheduler-locking)."),
7207 NULL,
7208 show_schedule_multiple,
7209 &setlist, &showlist);
7210
7211 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7212 Set mode of the step operation."), _("\
7213 Show mode of the step operation."), _("\
7214 When set, doing a step over a function without debug line information\n\
7215 will stop at the first instruction of that function. Otherwise, the\n\
7216 function is skipped and the step command stops at a different source line."),
7217 NULL,
7218 show_step_stop_if_no_debug,
7219 &setlist, &showlist);
7220
7221 add_setshow_enum_cmd ("displaced-stepping", class_run,
7222 can_use_displaced_stepping_enum,
7223 &can_use_displaced_stepping, _("\
7224 Set debugger's willingness to use displaced stepping."), _("\
7225 Show debugger's willingness to use displaced stepping."), _("\
7226 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7227 supported by the target architecture. If off, gdb will not use displaced\n\
7228 stepping to step over breakpoints, even if such is supported by the target\n\
7229 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7230 if the target architecture supports it and non-stop mode is active, but will not\n\
7231 use it in all-stop mode (see help set non-stop)."),
7232 NULL,
7233 show_can_use_displaced_stepping,
7234 &setlist, &showlist);
7235
7236 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7237 &exec_direction, _("Set direction of execution.\n\
7238 Options are 'forward' or 'reverse'."),
7239 _("Show direction of execution (forward/reverse)."),
7240 _("Tells gdb whether to execute forward or backward."),
7241 set_exec_direction_func, show_exec_direction_func,
7242 &setlist, &showlist);
7243
7244 /* Set/show detach-on-fork: user-settable mode. */
7245
7246 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7247 Set whether gdb will detach the child of a fork."), _("\
7248 Show whether gdb will detach the child of a fork."), _("\
7249 Tells gdb whether to detach the child of a fork."),
7250 NULL, NULL, &setlist, &showlist);
7251
7252 /* Set/show disable address space randomization mode. */
7253
7254 add_setshow_boolean_cmd ("disable-randomization", class_support,
7255 &disable_randomization, _("\
7256 Set disabling of debuggee's virtual address space randomization."), _("\
7257 Show disabling of debuggee's virtual address space randomization."), _("\
7258 When this mode is on (which is the default), randomization of the virtual\n\
7259 address space is disabled. Standalone programs run with the randomization\n\
7260 enabled by default on some platforms."),
7261 &set_disable_randomization,
7262 &show_disable_randomization,
7263 &setlist, &showlist);
7264
7265 /* ptid initializations */
7266 inferior_ptid = null_ptid;
7267 target_last_wait_ptid = minus_one_ptid;
7268
7269 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7270 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7271 observer_attach_thread_exit (infrun_thread_thread_exit);
7272 observer_attach_inferior_exit (infrun_inferior_exit);
7273
7274 /* Explicitly create without lookup, since that tries to create a
7275 value with a void typed value, and when we get here, gdbarch
7276 isn't initialized yet. At this point, we're quite sure there
7277 isn't another convenience variable of the same name. */
7278 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7279
7280 add_setshow_boolean_cmd ("observer", no_class,
7281 &observer_mode_1, _("\
7282 Set whether gdb controls the inferior in observer mode."), _("\
7283 Show whether gdb controls the inferior in observer mode."), _("\
7284 In observer mode, GDB can get data from the inferior, but not\n\
7285 affect its execution. Registers and memory may not be changed,\n\
7286 breakpoints may not be set, and the program cannot be interrupted\n\
7287 or signalled."),
7288 set_observer_mode,
7289 show_observer_mode,
7290 &setlist,
7291 &showlist);
7292 }
This page took 0.19419 seconds and 4 git commands to generate.