e0df5716476adce2769fc9a2f1a03f3cb74409e2
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "dictionary.h"
49 #include "block.h"
50 #include "gdb_assert.h"
51 #include "mi/mi-common.h"
52 #include "event-top.h"
53 #include "record.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149 /* If the program uses ELF-style shared libraries, then calls to
150 functions in shared libraries go through stubs, which live in a
151 table called the PLT (Procedure Linkage Table). The first time the
152 function is called, the stub sends control to the dynamic linker,
153 which looks up the function's real address, patches the stub so
154 that future calls will go directly to the function, and then passes
155 control to the function.
156
157 If we are stepping at the source level, we don't want to see any of
158 this --- we just want to skip over the stub and the dynamic linker.
159 The simple approach is to single-step until control leaves the
160 dynamic linker.
161
162 However, on some systems (e.g., Red Hat's 5.2 distribution) the
163 dynamic linker calls functions in the shared C library, so you
164 can't tell from the PC alone whether the dynamic linker is still
165 running. In this case, we use a step-resume breakpoint to get us
166 past the dynamic linker, as if we were using "next" to step over a
167 function call.
168
169 in_solib_dynsym_resolve_code() says whether we're in the dynamic
170 linker code or not. Normally, this means we single-step. However,
171 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
172 address where we can place a step-resume breakpoint to get past the
173 linker's symbol resolution function.
174
175 in_solib_dynsym_resolve_code() can generally be implemented in a
176 pretty portable way, by comparing the PC against the address ranges
177 of the dynamic linker's sections.
178
179 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
180 it depends on internal details of the dynamic linker. It's usually
181 not too hard to figure out where to put a breakpoint, but it
182 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
183 sanity checking. If it can't figure things out, returning zero and
184 getting the (possibly confusing) stepping behavior is better than
185 signalling an error, which will obscure the change in the
186 inferior's state. */
187
188 /* This function returns TRUE if pc is the address of an instruction
189 that lies within the dynamic linker (such as the event hook, or the
190 dld itself).
191
192 This function must be used only when a dynamic linker event has
193 been caught, and the inferior is being stepped out of the hook, or
194 undefined results are guaranteed. */
195
196 #ifndef SOLIB_IN_DYNAMIC_LINKER
197 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
198 #endif
199
200 /* "Observer mode" is somewhat like a more extreme version of
201 non-stop, in which all GDB operations that might affect the
202 target's execution have been disabled. */
203
204 static int non_stop_1 = 0;
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 extern int pagination_enabled;
214
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 target_async_permitted = 1;
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that the target may silently handle.
287 This is automatically determined from the flags above,
288 and simply cached here. */
289 static unsigned char *signal_pass;
290
291 #define SET_SIGS(nsigs,sigs,flags) \
292 do { \
293 int signum = (nsigs); \
294 while (signum-- > 0) \
295 if ((sigs)[signum]) \
296 (flags)[signum] = 1; \
297 } while (0)
298
299 #define UNSET_SIGS(nsigs,sigs,flags) \
300 do { \
301 int signum = (nsigs); \
302 while (signum-- > 0) \
303 if ((sigs)[signum]) \
304 (flags)[signum] = 0; \
305 } while (0)
306
307 /* Value to pass to target_resume() to cause all threads to resume. */
308
309 #define RESUME_ALL minus_one_ptid
310
311 /* Command list pointer for the "stop" placeholder. */
312
313 static struct cmd_list_element *stop_command;
314
315 /* Function inferior was in as of last step command. */
316
317 static struct symbol *step_start_function;
318
319 /* Nonzero if we want to give control to the user when we're notified
320 of shared library events by the dynamic linker. */
321 int stop_on_solib_events;
322 static void
323 show_stop_on_solib_events (struct ui_file *file, int from_tty,
324 struct cmd_list_element *c, const char *value)
325 {
326 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
327 value);
328 }
329
330 /* Nonzero means expecting a trace trap
331 and should stop the inferior and return silently when it happens. */
332
333 int stop_after_trap;
334
335 /* Save register contents here when executing a "finish" command or are
336 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
337 Thus this contains the return value from the called function (assuming
338 values are returned in a register). */
339
340 struct regcache *stop_registers;
341
342 /* Nonzero after stop if current stack frame should be printed. */
343
344 static int stop_print_frame;
345
346 /* This is a cached copy of the pid/waitstatus of the last event
347 returned by target_wait()/deprecated_target_wait_hook(). This
348 information is returned by get_last_target_status(). */
349 static ptid_t target_last_wait_ptid;
350 static struct target_waitstatus target_last_waitstatus;
351
352 static void context_switch (ptid_t ptid);
353
354 void init_thread_stepping_state (struct thread_info *tss);
355
356 void init_infwait_state (void);
357
358 static const char follow_fork_mode_child[] = "child";
359 static const char follow_fork_mode_parent[] = "parent";
360
361 static const char *follow_fork_mode_kind_names[] = {
362 follow_fork_mode_child,
363 follow_fork_mode_parent,
364 NULL
365 };
366
367 static const char *follow_fork_mode_string = follow_fork_mode_parent;
368 static void
369 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
370 struct cmd_list_element *c, const char *value)
371 {
372 fprintf_filtered (file,
373 _("Debugger response to a program "
374 "call of fork or vfork is \"%s\".\n"),
375 value);
376 }
377 \f
378
379 /* Tell the target to follow the fork we're stopped at. Returns true
380 if the inferior should be resumed; false, if the target for some
381 reason decided it's best not to resume. */
382
383 static int
384 follow_fork (void)
385 {
386 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
387 int should_resume = 1;
388 struct thread_info *tp;
389
390 /* Copy user stepping state to the new inferior thread. FIXME: the
391 followed fork child thread should have a copy of most of the
392 parent thread structure's run control related fields, not just these.
393 Initialized to avoid "may be used uninitialized" warnings from gcc. */
394 struct breakpoint *step_resume_breakpoint = NULL;
395 struct breakpoint *exception_resume_breakpoint = NULL;
396 CORE_ADDR step_range_start = 0;
397 CORE_ADDR step_range_end = 0;
398 struct frame_id step_frame_id = { 0 };
399
400 if (!non_stop)
401 {
402 ptid_t wait_ptid;
403 struct target_waitstatus wait_status;
404
405 /* Get the last target status returned by target_wait(). */
406 get_last_target_status (&wait_ptid, &wait_status);
407
408 /* If not stopped at a fork event, then there's nothing else to
409 do. */
410 if (wait_status.kind != TARGET_WAITKIND_FORKED
411 && wait_status.kind != TARGET_WAITKIND_VFORKED)
412 return 1;
413
414 /* Check if we switched over from WAIT_PTID, since the event was
415 reported. */
416 if (!ptid_equal (wait_ptid, minus_one_ptid)
417 && !ptid_equal (inferior_ptid, wait_ptid))
418 {
419 /* We did. Switch back to WAIT_PTID thread, to tell the
420 target to follow it (in either direction). We'll
421 afterwards refuse to resume, and inform the user what
422 happened. */
423 switch_to_thread (wait_ptid);
424 should_resume = 0;
425 }
426 }
427
428 tp = inferior_thread ();
429
430 /* If there were any forks/vforks that were caught and are now to be
431 followed, then do so now. */
432 switch (tp->pending_follow.kind)
433 {
434 case TARGET_WAITKIND_FORKED:
435 case TARGET_WAITKIND_VFORKED:
436 {
437 ptid_t parent, child;
438
439 /* If the user did a next/step, etc, over a fork call,
440 preserve the stepping state in the fork child. */
441 if (follow_child && should_resume)
442 {
443 step_resume_breakpoint = clone_momentary_breakpoint
444 (tp->control.step_resume_breakpoint);
445 step_range_start = tp->control.step_range_start;
446 step_range_end = tp->control.step_range_end;
447 step_frame_id = tp->control.step_frame_id;
448 exception_resume_breakpoint
449 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
450
451 /* For now, delete the parent's sr breakpoint, otherwise,
452 parent/child sr breakpoints are considered duplicates,
453 and the child version will not be installed. Remove
454 this when the breakpoints module becomes aware of
455 inferiors and address spaces. */
456 delete_step_resume_breakpoint (tp);
457 tp->control.step_range_start = 0;
458 tp->control.step_range_end = 0;
459 tp->control.step_frame_id = null_frame_id;
460 delete_exception_resume_breakpoint (tp);
461 }
462
463 parent = inferior_ptid;
464 child = tp->pending_follow.value.related_pid;
465
466 /* Tell the target to do whatever is necessary to follow
467 either parent or child. */
468 if (target_follow_fork (follow_child))
469 {
470 /* Target refused to follow, or there's some other reason
471 we shouldn't resume. */
472 should_resume = 0;
473 }
474 else
475 {
476 /* This pending follow fork event is now handled, one way
477 or another. The previous selected thread may be gone
478 from the lists by now, but if it is still around, need
479 to clear the pending follow request. */
480 tp = find_thread_ptid (parent);
481 if (tp)
482 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
483
484 /* This makes sure we don't try to apply the "Switched
485 over from WAIT_PID" logic above. */
486 nullify_last_target_wait_ptid ();
487
488 /* If we followed the child, switch to it... */
489 if (follow_child)
490 {
491 switch_to_thread (child);
492
493 /* ... and preserve the stepping state, in case the
494 user was stepping over the fork call. */
495 if (should_resume)
496 {
497 tp = inferior_thread ();
498 tp->control.step_resume_breakpoint
499 = step_resume_breakpoint;
500 tp->control.step_range_start = step_range_start;
501 tp->control.step_range_end = step_range_end;
502 tp->control.step_frame_id = step_frame_id;
503 tp->control.exception_resume_breakpoint
504 = exception_resume_breakpoint;
505 }
506 else
507 {
508 /* If we get here, it was because we're trying to
509 resume from a fork catchpoint, but, the user
510 has switched threads away from the thread that
511 forked. In that case, the resume command
512 issued is most likely not applicable to the
513 child, so just warn, and refuse to resume. */
514 warning (_("Not resuming: switched threads "
515 "before following fork child.\n"));
516 }
517
518 /* Reset breakpoints in the child as appropriate. */
519 follow_inferior_reset_breakpoints ();
520 }
521 else
522 switch_to_thread (parent);
523 }
524 }
525 break;
526 case TARGET_WAITKIND_SPURIOUS:
527 /* Nothing to follow. */
528 break;
529 default:
530 internal_error (__FILE__, __LINE__,
531 "Unexpected pending_follow.kind %d\n",
532 tp->pending_follow.kind);
533 break;
534 }
535
536 return should_resume;
537 }
538
539 void
540 follow_inferior_reset_breakpoints (void)
541 {
542 struct thread_info *tp = inferior_thread ();
543
544 /* Was there a step_resume breakpoint? (There was if the user
545 did a "next" at the fork() call.) If so, explicitly reset its
546 thread number.
547
548 step_resumes are a form of bp that are made to be per-thread.
549 Since we created the step_resume bp when the parent process
550 was being debugged, and now are switching to the child process,
551 from the breakpoint package's viewpoint, that's a switch of
552 "threads". We must update the bp's notion of which thread
553 it is for, or it'll be ignored when it triggers. */
554
555 if (tp->control.step_resume_breakpoint)
556 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
557
558 if (tp->control.exception_resume_breakpoint)
559 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
560
561 /* Reinsert all breakpoints in the child. The user may have set
562 breakpoints after catching the fork, in which case those
563 were never set in the child, but only in the parent. This makes
564 sure the inserted breakpoints match the breakpoint list. */
565
566 breakpoint_re_set ();
567 insert_breakpoints ();
568 }
569
570 /* The child has exited or execed: resume threads of the parent the
571 user wanted to be executing. */
572
573 static int
574 proceed_after_vfork_done (struct thread_info *thread,
575 void *arg)
576 {
577 int pid = * (int *) arg;
578
579 if (ptid_get_pid (thread->ptid) == pid
580 && is_running (thread->ptid)
581 && !is_executing (thread->ptid)
582 && !thread->stop_requested
583 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
584 {
585 if (debug_infrun)
586 fprintf_unfiltered (gdb_stdlog,
587 "infrun: resuming vfork parent thread %s\n",
588 target_pid_to_str (thread->ptid));
589
590 switch_to_thread (thread->ptid);
591 clear_proceed_status ();
592 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
593 }
594
595 return 0;
596 }
597
598 /* Called whenever we notice an exec or exit event, to handle
599 detaching or resuming a vfork parent. */
600
601 static void
602 handle_vfork_child_exec_or_exit (int exec)
603 {
604 struct inferior *inf = current_inferior ();
605
606 if (inf->vfork_parent)
607 {
608 int resume_parent = -1;
609
610 /* This exec or exit marks the end of the shared memory region
611 between the parent and the child. If the user wanted to
612 detach from the parent, now is the time. */
613
614 if (inf->vfork_parent->pending_detach)
615 {
616 struct thread_info *tp;
617 struct cleanup *old_chain;
618 struct program_space *pspace;
619 struct address_space *aspace;
620
621 /* follow-fork child, detach-on-fork on. */
622
623 old_chain = make_cleanup_restore_current_thread ();
624
625 /* We're letting loose of the parent. */
626 tp = any_live_thread_of_process (inf->vfork_parent->pid);
627 switch_to_thread (tp->ptid);
628
629 /* We're about to detach from the parent, which implicitly
630 removes breakpoints from its address space. There's a
631 catch here: we want to reuse the spaces for the child,
632 but, parent/child are still sharing the pspace at this
633 point, although the exec in reality makes the kernel give
634 the child a fresh set of new pages. The problem here is
635 that the breakpoints module being unaware of this, would
636 likely chose the child process to write to the parent
637 address space. Swapping the child temporarily away from
638 the spaces has the desired effect. Yes, this is "sort
639 of" a hack. */
640
641 pspace = inf->pspace;
642 aspace = inf->aspace;
643 inf->aspace = NULL;
644 inf->pspace = NULL;
645
646 if (debug_infrun || info_verbose)
647 {
648 target_terminal_ours ();
649
650 if (exec)
651 fprintf_filtered (gdb_stdlog,
652 "Detaching vfork parent process "
653 "%d after child exec.\n",
654 inf->vfork_parent->pid);
655 else
656 fprintf_filtered (gdb_stdlog,
657 "Detaching vfork parent process "
658 "%d after child exit.\n",
659 inf->vfork_parent->pid);
660 }
661
662 target_detach (NULL, 0);
663
664 /* Put it back. */
665 inf->pspace = pspace;
666 inf->aspace = aspace;
667
668 do_cleanups (old_chain);
669 }
670 else if (exec)
671 {
672 /* We're staying attached to the parent, so, really give the
673 child a new address space. */
674 inf->pspace = add_program_space (maybe_new_address_space ());
675 inf->aspace = inf->pspace->aspace;
676 inf->removable = 1;
677 set_current_program_space (inf->pspace);
678
679 resume_parent = inf->vfork_parent->pid;
680
681 /* Break the bonds. */
682 inf->vfork_parent->vfork_child = NULL;
683 }
684 else
685 {
686 struct cleanup *old_chain;
687 struct program_space *pspace;
688
689 /* If this is a vfork child exiting, then the pspace and
690 aspaces were shared with the parent. Since we're
691 reporting the process exit, we'll be mourning all that is
692 found in the address space, and switching to null_ptid,
693 preparing to start a new inferior. But, since we don't
694 want to clobber the parent's address/program spaces, we
695 go ahead and create a new one for this exiting
696 inferior. */
697
698 /* Switch to null_ptid, so that clone_program_space doesn't want
699 to read the selected frame of a dead process. */
700 old_chain = save_inferior_ptid ();
701 inferior_ptid = null_ptid;
702
703 /* This inferior is dead, so avoid giving the breakpoints
704 module the option to write through to it (cloning a
705 program space resets breakpoints). */
706 inf->aspace = NULL;
707 inf->pspace = NULL;
708 pspace = add_program_space (maybe_new_address_space ());
709 set_current_program_space (pspace);
710 inf->removable = 1;
711 clone_program_space (pspace, inf->vfork_parent->pspace);
712 inf->pspace = pspace;
713 inf->aspace = pspace->aspace;
714
715 /* Put back inferior_ptid. We'll continue mourning this
716 inferior. */
717 do_cleanups (old_chain);
718
719 resume_parent = inf->vfork_parent->pid;
720 /* Break the bonds. */
721 inf->vfork_parent->vfork_child = NULL;
722 }
723
724 inf->vfork_parent = NULL;
725
726 gdb_assert (current_program_space == inf->pspace);
727
728 if (non_stop && resume_parent != -1)
729 {
730 /* If the user wanted the parent to be running, let it go
731 free now. */
732 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
733
734 if (debug_infrun)
735 fprintf_unfiltered (gdb_stdlog,
736 "infrun: resuming vfork parent process %d\n",
737 resume_parent);
738
739 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
740
741 do_cleanups (old_chain);
742 }
743 }
744 }
745
746 /* Enum strings for "set|show displaced-stepping". */
747
748 static const char follow_exec_mode_new[] = "new";
749 static const char follow_exec_mode_same[] = "same";
750 static const char *follow_exec_mode_names[] =
751 {
752 follow_exec_mode_new,
753 follow_exec_mode_same,
754 NULL,
755 };
756
757 static const char *follow_exec_mode_string = follow_exec_mode_same;
758 static void
759 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
760 struct cmd_list_element *c, const char *value)
761 {
762 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
763 }
764
765 /* EXECD_PATHNAME is assumed to be non-NULL. */
766
767 static void
768 follow_exec (ptid_t pid, char *execd_pathname)
769 {
770 struct thread_info *th = inferior_thread ();
771 struct inferior *inf = current_inferior ();
772
773 /* This is an exec event that we actually wish to pay attention to.
774 Refresh our symbol table to the newly exec'd program, remove any
775 momentary bp's, etc.
776
777 If there are breakpoints, they aren't really inserted now,
778 since the exec() transformed our inferior into a fresh set
779 of instructions.
780
781 We want to preserve symbolic breakpoints on the list, since
782 we have hopes that they can be reset after the new a.out's
783 symbol table is read.
784
785 However, any "raw" breakpoints must be removed from the list
786 (e.g., the solib bp's), since their address is probably invalid
787 now.
788
789 And, we DON'T want to call delete_breakpoints() here, since
790 that may write the bp's "shadow contents" (the instruction
791 value that was overwritten witha TRAP instruction). Since
792 we now have a new a.out, those shadow contents aren't valid. */
793
794 mark_breakpoints_out ();
795
796 update_breakpoints_after_exec ();
797
798 /* If there was one, it's gone now. We cannot truly step-to-next
799 statement through an exec(). */
800 th->control.step_resume_breakpoint = NULL;
801 th->control.exception_resume_breakpoint = NULL;
802 th->control.step_range_start = 0;
803 th->control.step_range_end = 0;
804
805 /* The target reports the exec event to the main thread, even if
806 some other thread does the exec, and even if the main thread was
807 already stopped --- if debugging in non-stop mode, it's possible
808 the user had the main thread held stopped in the previous image
809 --- release it now. This is the same behavior as step-over-exec
810 with scheduler-locking on in all-stop mode. */
811 th->stop_requested = 0;
812
813 /* What is this a.out's name? */
814 printf_unfiltered (_("%s is executing new program: %s\n"),
815 target_pid_to_str (inferior_ptid),
816 execd_pathname);
817
818 /* We've followed the inferior through an exec. Therefore, the
819 inferior has essentially been killed & reborn. */
820
821 gdb_flush (gdb_stdout);
822
823 breakpoint_init_inferior (inf_execd);
824
825 if (gdb_sysroot && *gdb_sysroot)
826 {
827 char *name = alloca (strlen (gdb_sysroot)
828 + strlen (execd_pathname)
829 + 1);
830
831 strcpy (name, gdb_sysroot);
832 strcat (name, execd_pathname);
833 execd_pathname = name;
834 }
835
836 /* Reset the shared library package. This ensures that we get a
837 shlib event when the child reaches "_start", at which point the
838 dld will have had a chance to initialize the child. */
839 /* Also, loading a symbol file below may trigger symbol lookups, and
840 we don't want those to be satisfied by the libraries of the
841 previous incarnation of this process. */
842 no_shared_libraries (NULL, 0);
843
844 if (follow_exec_mode_string == follow_exec_mode_new)
845 {
846 struct program_space *pspace;
847
848 /* The user wants to keep the old inferior and program spaces
849 around. Create a new fresh one, and switch to it. */
850
851 inf = add_inferior (current_inferior ()->pid);
852 pspace = add_program_space (maybe_new_address_space ());
853 inf->pspace = pspace;
854 inf->aspace = pspace->aspace;
855
856 exit_inferior_num_silent (current_inferior ()->num);
857
858 set_current_inferior (inf);
859 set_current_program_space (pspace);
860 }
861
862 gdb_assert (current_program_space == inf->pspace);
863
864 /* That a.out is now the one to use. */
865 exec_file_attach (execd_pathname, 0);
866
867 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
868 (Position Independent Executable) main symbol file will get applied by
869 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
870 the breakpoints with the zero displacement. */
871
872 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
873 NULL, 0);
874
875 set_initial_language ();
876
877 #ifdef SOLIB_CREATE_INFERIOR_HOOK
878 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
879 #else
880 solib_create_inferior_hook (0);
881 #endif
882
883 jit_inferior_created_hook ();
884
885 breakpoint_re_set ();
886
887 /* Reinsert all breakpoints. (Those which were symbolic have
888 been reset to the proper address in the new a.out, thanks
889 to symbol_file_command...). */
890 insert_breakpoints ();
891
892 /* The next resume of this inferior should bring it to the shlib
893 startup breakpoints. (If the user had also set bp's on
894 "main" from the old (parent) process, then they'll auto-
895 matically get reset there in the new process.). */
896 }
897
898 /* Non-zero if we just simulating a single-step. This is needed
899 because we cannot remove the breakpoints in the inferior process
900 until after the `wait' in `wait_for_inferior'. */
901 static int singlestep_breakpoints_inserted_p = 0;
902
903 /* The thread we inserted single-step breakpoints for. */
904 static ptid_t singlestep_ptid;
905
906 /* PC when we started this single-step. */
907 static CORE_ADDR singlestep_pc;
908
909 /* If another thread hit the singlestep breakpoint, we save the original
910 thread here so that we can resume single-stepping it later. */
911 static ptid_t saved_singlestep_ptid;
912 static int stepping_past_singlestep_breakpoint;
913
914 /* If not equal to null_ptid, this means that after stepping over breakpoint
915 is finished, we need to switch to deferred_step_ptid, and step it.
916
917 The use case is when one thread has hit a breakpoint, and then the user
918 has switched to another thread and issued 'step'. We need to step over
919 breakpoint in the thread which hit the breakpoint, but then continue
920 stepping the thread user has selected. */
921 static ptid_t deferred_step_ptid;
922 \f
923 /* Displaced stepping. */
924
925 /* In non-stop debugging mode, we must take special care to manage
926 breakpoints properly; in particular, the traditional strategy for
927 stepping a thread past a breakpoint it has hit is unsuitable.
928 'Displaced stepping' is a tactic for stepping one thread past a
929 breakpoint it has hit while ensuring that other threads running
930 concurrently will hit the breakpoint as they should.
931
932 The traditional way to step a thread T off a breakpoint in a
933 multi-threaded program in all-stop mode is as follows:
934
935 a0) Initially, all threads are stopped, and breakpoints are not
936 inserted.
937 a1) We single-step T, leaving breakpoints uninserted.
938 a2) We insert breakpoints, and resume all threads.
939
940 In non-stop debugging, however, this strategy is unsuitable: we
941 don't want to have to stop all threads in the system in order to
942 continue or step T past a breakpoint. Instead, we use displaced
943 stepping:
944
945 n0) Initially, T is stopped, other threads are running, and
946 breakpoints are inserted.
947 n1) We copy the instruction "under" the breakpoint to a separate
948 location, outside the main code stream, making any adjustments
949 to the instruction, register, and memory state as directed by
950 T's architecture.
951 n2) We single-step T over the instruction at its new location.
952 n3) We adjust the resulting register and memory state as directed
953 by T's architecture. This includes resetting T's PC to point
954 back into the main instruction stream.
955 n4) We resume T.
956
957 This approach depends on the following gdbarch methods:
958
959 - gdbarch_max_insn_length and gdbarch_displaced_step_location
960 indicate where to copy the instruction, and how much space must
961 be reserved there. We use these in step n1.
962
963 - gdbarch_displaced_step_copy_insn copies a instruction to a new
964 address, and makes any necessary adjustments to the instruction,
965 register contents, and memory. We use this in step n1.
966
967 - gdbarch_displaced_step_fixup adjusts registers and memory after
968 we have successfuly single-stepped the instruction, to yield the
969 same effect the instruction would have had if we had executed it
970 at its original address. We use this in step n3.
971
972 - gdbarch_displaced_step_free_closure provides cleanup.
973
974 The gdbarch_displaced_step_copy_insn and
975 gdbarch_displaced_step_fixup functions must be written so that
976 copying an instruction with gdbarch_displaced_step_copy_insn,
977 single-stepping across the copied instruction, and then applying
978 gdbarch_displaced_insn_fixup should have the same effects on the
979 thread's memory and registers as stepping the instruction in place
980 would have. Exactly which responsibilities fall to the copy and
981 which fall to the fixup is up to the author of those functions.
982
983 See the comments in gdbarch.sh for details.
984
985 Note that displaced stepping and software single-step cannot
986 currently be used in combination, although with some care I think
987 they could be made to. Software single-step works by placing
988 breakpoints on all possible subsequent instructions; if the
989 displaced instruction is a PC-relative jump, those breakpoints
990 could fall in very strange places --- on pages that aren't
991 executable, or at addresses that are not proper instruction
992 boundaries. (We do generally let other threads run while we wait
993 to hit the software single-step breakpoint, and they might
994 encounter such a corrupted instruction.) One way to work around
995 this would be to have gdbarch_displaced_step_copy_insn fully
996 simulate the effect of PC-relative instructions (and return NULL)
997 on architectures that use software single-stepping.
998
999 In non-stop mode, we can have independent and simultaneous step
1000 requests, so more than one thread may need to simultaneously step
1001 over a breakpoint. The current implementation assumes there is
1002 only one scratch space per process. In this case, we have to
1003 serialize access to the scratch space. If thread A wants to step
1004 over a breakpoint, but we are currently waiting for some other
1005 thread to complete a displaced step, we leave thread A stopped and
1006 place it in the displaced_step_request_queue. Whenever a displaced
1007 step finishes, we pick the next thread in the queue and start a new
1008 displaced step operation on it. See displaced_step_prepare and
1009 displaced_step_fixup for details. */
1010
1011 struct displaced_step_request
1012 {
1013 ptid_t ptid;
1014 struct displaced_step_request *next;
1015 };
1016
1017 /* Per-inferior displaced stepping state. */
1018 struct displaced_step_inferior_state
1019 {
1020 /* Pointer to next in linked list. */
1021 struct displaced_step_inferior_state *next;
1022
1023 /* The process this displaced step state refers to. */
1024 int pid;
1025
1026 /* A queue of pending displaced stepping requests. One entry per
1027 thread that needs to do a displaced step. */
1028 struct displaced_step_request *step_request_queue;
1029
1030 /* If this is not null_ptid, this is the thread carrying out a
1031 displaced single-step in process PID. This thread's state will
1032 require fixing up once it has completed its step. */
1033 ptid_t step_ptid;
1034
1035 /* The architecture the thread had when we stepped it. */
1036 struct gdbarch *step_gdbarch;
1037
1038 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1039 for post-step cleanup. */
1040 struct displaced_step_closure *step_closure;
1041
1042 /* The address of the original instruction, and the copy we
1043 made. */
1044 CORE_ADDR step_original, step_copy;
1045
1046 /* Saved contents of copy area. */
1047 gdb_byte *step_saved_copy;
1048 };
1049
1050 /* The list of states of processes involved in displaced stepping
1051 presently. */
1052 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1053
1054 /* Get the displaced stepping state of process PID. */
1055
1056 static struct displaced_step_inferior_state *
1057 get_displaced_stepping_state (int pid)
1058 {
1059 struct displaced_step_inferior_state *state;
1060
1061 for (state = displaced_step_inferior_states;
1062 state != NULL;
1063 state = state->next)
1064 if (state->pid == pid)
1065 return state;
1066
1067 return NULL;
1068 }
1069
1070 /* Add a new displaced stepping state for process PID to the displaced
1071 stepping state list, or return a pointer to an already existing
1072 entry, if it already exists. Never returns NULL. */
1073
1074 static struct displaced_step_inferior_state *
1075 add_displaced_stepping_state (int pid)
1076 {
1077 struct displaced_step_inferior_state *state;
1078
1079 for (state = displaced_step_inferior_states;
1080 state != NULL;
1081 state = state->next)
1082 if (state->pid == pid)
1083 return state;
1084
1085 state = xcalloc (1, sizeof (*state));
1086 state->pid = pid;
1087 state->next = displaced_step_inferior_states;
1088 displaced_step_inferior_states = state;
1089
1090 return state;
1091 }
1092
1093 /* If inferior is in displaced stepping, and ADDR equals to starting address
1094 of copy area, return corresponding displaced_step_closure. Otherwise,
1095 return NULL. */
1096
1097 struct displaced_step_closure*
1098 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1099 {
1100 struct displaced_step_inferior_state *displaced
1101 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1102
1103 /* If checking the mode of displaced instruction in copy area. */
1104 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1105 && (displaced->step_copy == addr))
1106 return displaced->step_closure;
1107
1108 return NULL;
1109 }
1110
1111 /* Remove the displaced stepping state of process PID. */
1112
1113 static void
1114 remove_displaced_stepping_state (int pid)
1115 {
1116 struct displaced_step_inferior_state *it, **prev_next_p;
1117
1118 gdb_assert (pid != 0);
1119
1120 it = displaced_step_inferior_states;
1121 prev_next_p = &displaced_step_inferior_states;
1122 while (it)
1123 {
1124 if (it->pid == pid)
1125 {
1126 *prev_next_p = it->next;
1127 xfree (it);
1128 return;
1129 }
1130
1131 prev_next_p = &it->next;
1132 it = *prev_next_p;
1133 }
1134 }
1135
1136 static void
1137 infrun_inferior_exit (struct inferior *inf)
1138 {
1139 remove_displaced_stepping_state (inf->pid);
1140 }
1141
1142 /* Enum strings for "set|show displaced-stepping". */
1143
1144 static const char can_use_displaced_stepping_auto[] = "auto";
1145 static const char can_use_displaced_stepping_on[] = "on";
1146 static const char can_use_displaced_stepping_off[] = "off";
1147 static const char *can_use_displaced_stepping_enum[] =
1148 {
1149 can_use_displaced_stepping_auto,
1150 can_use_displaced_stepping_on,
1151 can_use_displaced_stepping_off,
1152 NULL,
1153 };
1154
1155 /* If ON, and the architecture supports it, GDB will use displaced
1156 stepping to step over breakpoints. If OFF, or if the architecture
1157 doesn't support it, GDB will instead use the traditional
1158 hold-and-step approach. If AUTO (which is the default), GDB will
1159 decide which technique to use to step over breakpoints depending on
1160 which of all-stop or non-stop mode is active --- displaced stepping
1161 in non-stop mode; hold-and-step in all-stop mode. */
1162
1163 static const char *can_use_displaced_stepping =
1164 can_use_displaced_stepping_auto;
1165
1166 static void
1167 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1168 struct cmd_list_element *c,
1169 const char *value)
1170 {
1171 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1172 fprintf_filtered (file,
1173 _("Debugger's willingness to use displaced stepping "
1174 "to step over breakpoints is %s (currently %s).\n"),
1175 value, non_stop ? "on" : "off");
1176 else
1177 fprintf_filtered (file,
1178 _("Debugger's willingness to use displaced stepping "
1179 "to step over breakpoints is %s.\n"), value);
1180 }
1181
1182 /* Return non-zero if displaced stepping can/should be used to step
1183 over breakpoints. */
1184
1185 static int
1186 use_displaced_stepping (struct gdbarch *gdbarch)
1187 {
1188 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1189 && non_stop)
1190 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1191 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1192 && !RECORD_IS_USED);
1193 }
1194
1195 /* Clean out any stray displaced stepping state. */
1196 static void
1197 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1198 {
1199 /* Indicate that there is no cleanup pending. */
1200 displaced->step_ptid = null_ptid;
1201
1202 if (displaced->step_closure)
1203 {
1204 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1205 displaced->step_closure);
1206 displaced->step_closure = NULL;
1207 }
1208 }
1209
1210 static void
1211 displaced_step_clear_cleanup (void *arg)
1212 {
1213 struct displaced_step_inferior_state *state = arg;
1214
1215 displaced_step_clear (state);
1216 }
1217
1218 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1219 void
1220 displaced_step_dump_bytes (struct ui_file *file,
1221 const gdb_byte *buf,
1222 size_t len)
1223 {
1224 int i;
1225
1226 for (i = 0; i < len; i++)
1227 fprintf_unfiltered (file, "%02x ", buf[i]);
1228 fputs_unfiltered ("\n", file);
1229 }
1230
1231 /* Prepare to single-step, using displaced stepping.
1232
1233 Note that we cannot use displaced stepping when we have a signal to
1234 deliver. If we have a signal to deliver and an instruction to step
1235 over, then after the step, there will be no indication from the
1236 target whether the thread entered a signal handler or ignored the
1237 signal and stepped over the instruction successfully --- both cases
1238 result in a simple SIGTRAP. In the first case we mustn't do a
1239 fixup, and in the second case we must --- but we can't tell which.
1240 Comments in the code for 'random signals' in handle_inferior_event
1241 explain how we handle this case instead.
1242
1243 Returns 1 if preparing was successful -- this thread is going to be
1244 stepped now; or 0 if displaced stepping this thread got queued. */
1245 static int
1246 displaced_step_prepare (ptid_t ptid)
1247 {
1248 struct cleanup *old_cleanups, *ignore_cleanups;
1249 struct regcache *regcache = get_thread_regcache (ptid);
1250 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1251 CORE_ADDR original, copy;
1252 ULONGEST len;
1253 struct displaced_step_closure *closure;
1254 struct displaced_step_inferior_state *displaced;
1255
1256 /* We should never reach this function if the architecture does not
1257 support displaced stepping. */
1258 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1259
1260 /* We have to displaced step one thread at a time, as we only have
1261 access to a single scratch space per inferior. */
1262
1263 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1264
1265 if (!ptid_equal (displaced->step_ptid, null_ptid))
1266 {
1267 /* Already waiting for a displaced step to finish. Defer this
1268 request and place in queue. */
1269 struct displaced_step_request *req, *new_req;
1270
1271 if (debug_displaced)
1272 fprintf_unfiltered (gdb_stdlog,
1273 "displaced: defering step of %s\n",
1274 target_pid_to_str (ptid));
1275
1276 new_req = xmalloc (sizeof (*new_req));
1277 new_req->ptid = ptid;
1278 new_req->next = NULL;
1279
1280 if (displaced->step_request_queue)
1281 {
1282 for (req = displaced->step_request_queue;
1283 req && req->next;
1284 req = req->next)
1285 ;
1286 req->next = new_req;
1287 }
1288 else
1289 displaced->step_request_queue = new_req;
1290
1291 return 0;
1292 }
1293 else
1294 {
1295 if (debug_displaced)
1296 fprintf_unfiltered (gdb_stdlog,
1297 "displaced: stepping %s now\n",
1298 target_pid_to_str (ptid));
1299 }
1300
1301 displaced_step_clear (displaced);
1302
1303 old_cleanups = save_inferior_ptid ();
1304 inferior_ptid = ptid;
1305
1306 original = regcache_read_pc (regcache);
1307
1308 copy = gdbarch_displaced_step_location (gdbarch);
1309 len = gdbarch_max_insn_length (gdbarch);
1310
1311 /* Save the original contents of the copy area. */
1312 displaced->step_saved_copy = xmalloc (len);
1313 ignore_cleanups = make_cleanup (free_current_contents,
1314 &displaced->step_saved_copy);
1315 read_memory (copy, displaced->step_saved_copy, len);
1316 if (debug_displaced)
1317 {
1318 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1319 paddress (gdbarch, copy));
1320 displaced_step_dump_bytes (gdb_stdlog,
1321 displaced->step_saved_copy,
1322 len);
1323 };
1324
1325 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1326 original, copy, regcache);
1327
1328 /* We don't support the fully-simulated case at present. */
1329 gdb_assert (closure);
1330
1331 /* Save the information we need to fix things up if the step
1332 succeeds. */
1333 displaced->step_ptid = ptid;
1334 displaced->step_gdbarch = gdbarch;
1335 displaced->step_closure = closure;
1336 displaced->step_original = original;
1337 displaced->step_copy = copy;
1338
1339 make_cleanup (displaced_step_clear_cleanup, displaced);
1340
1341 /* Resume execution at the copy. */
1342 regcache_write_pc (regcache, copy);
1343
1344 discard_cleanups (ignore_cleanups);
1345
1346 do_cleanups (old_cleanups);
1347
1348 if (debug_displaced)
1349 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1350 paddress (gdbarch, copy));
1351
1352 return 1;
1353 }
1354
1355 static void
1356 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1357 const gdb_byte *myaddr, int len)
1358 {
1359 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1360
1361 inferior_ptid = ptid;
1362 write_memory (memaddr, myaddr, len);
1363 do_cleanups (ptid_cleanup);
1364 }
1365
1366 static void
1367 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1368 {
1369 struct cleanup *old_cleanups;
1370 struct displaced_step_inferior_state *displaced
1371 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1372
1373 /* Was any thread of this process doing a displaced step? */
1374 if (displaced == NULL)
1375 return;
1376
1377 /* Was this event for the pid we displaced? */
1378 if (ptid_equal (displaced->step_ptid, null_ptid)
1379 || ! ptid_equal (displaced->step_ptid, event_ptid))
1380 return;
1381
1382 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1383
1384 /* Restore the contents of the copy area. */
1385 {
1386 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1387
1388 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1389 displaced->step_saved_copy, len);
1390 if (debug_displaced)
1391 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1392 paddress (displaced->step_gdbarch,
1393 displaced->step_copy));
1394 }
1395
1396 /* Did the instruction complete successfully? */
1397 if (signal == TARGET_SIGNAL_TRAP)
1398 {
1399 /* Fix up the resulting state. */
1400 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1401 displaced->step_closure,
1402 displaced->step_original,
1403 displaced->step_copy,
1404 get_thread_regcache (displaced->step_ptid));
1405 }
1406 else
1407 {
1408 /* Since the instruction didn't complete, all we can do is
1409 relocate the PC. */
1410 struct regcache *regcache = get_thread_regcache (event_ptid);
1411 CORE_ADDR pc = regcache_read_pc (regcache);
1412
1413 pc = displaced->step_original + (pc - displaced->step_copy);
1414 regcache_write_pc (regcache, pc);
1415 }
1416
1417 do_cleanups (old_cleanups);
1418
1419 displaced->step_ptid = null_ptid;
1420
1421 /* Are there any pending displaced stepping requests? If so, run
1422 one now. Leave the state object around, since we're likely to
1423 need it again soon. */
1424 while (displaced->step_request_queue)
1425 {
1426 struct displaced_step_request *head;
1427 ptid_t ptid;
1428 struct regcache *regcache;
1429 struct gdbarch *gdbarch;
1430 CORE_ADDR actual_pc;
1431 struct address_space *aspace;
1432
1433 head = displaced->step_request_queue;
1434 ptid = head->ptid;
1435 displaced->step_request_queue = head->next;
1436 xfree (head);
1437
1438 context_switch (ptid);
1439
1440 regcache = get_thread_regcache (ptid);
1441 actual_pc = regcache_read_pc (regcache);
1442 aspace = get_regcache_aspace (regcache);
1443
1444 if (breakpoint_here_p (aspace, actual_pc))
1445 {
1446 if (debug_displaced)
1447 fprintf_unfiltered (gdb_stdlog,
1448 "displaced: stepping queued %s now\n",
1449 target_pid_to_str (ptid));
1450
1451 displaced_step_prepare (ptid);
1452
1453 gdbarch = get_regcache_arch (regcache);
1454
1455 if (debug_displaced)
1456 {
1457 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1458 gdb_byte buf[4];
1459
1460 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1461 paddress (gdbarch, actual_pc));
1462 read_memory (actual_pc, buf, sizeof (buf));
1463 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1464 }
1465
1466 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1467 displaced->step_closure))
1468 target_resume (ptid, 1, TARGET_SIGNAL_0);
1469 else
1470 target_resume (ptid, 0, TARGET_SIGNAL_0);
1471
1472 /* Done, we're stepping a thread. */
1473 break;
1474 }
1475 else
1476 {
1477 int step;
1478 struct thread_info *tp = inferior_thread ();
1479
1480 /* The breakpoint we were sitting under has since been
1481 removed. */
1482 tp->control.trap_expected = 0;
1483
1484 /* Go back to what we were trying to do. */
1485 step = currently_stepping (tp);
1486
1487 if (debug_displaced)
1488 fprintf_unfiltered (gdb_stdlog,
1489 "breakpoint is gone %s: step(%d)\n",
1490 target_pid_to_str (tp->ptid), step);
1491
1492 target_resume (ptid, step, TARGET_SIGNAL_0);
1493 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1494
1495 /* This request was discarded. See if there's any other
1496 thread waiting for its turn. */
1497 }
1498 }
1499 }
1500
1501 /* Update global variables holding ptids to hold NEW_PTID if they were
1502 holding OLD_PTID. */
1503 static void
1504 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1505 {
1506 struct displaced_step_request *it;
1507 struct displaced_step_inferior_state *displaced;
1508
1509 if (ptid_equal (inferior_ptid, old_ptid))
1510 inferior_ptid = new_ptid;
1511
1512 if (ptid_equal (singlestep_ptid, old_ptid))
1513 singlestep_ptid = new_ptid;
1514
1515 if (ptid_equal (deferred_step_ptid, old_ptid))
1516 deferred_step_ptid = new_ptid;
1517
1518 for (displaced = displaced_step_inferior_states;
1519 displaced;
1520 displaced = displaced->next)
1521 {
1522 if (ptid_equal (displaced->step_ptid, old_ptid))
1523 displaced->step_ptid = new_ptid;
1524
1525 for (it = displaced->step_request_queue; it; it = it->next)
1526 if (ptid_equal (it->ptid, old_ptid))
1527 it->ptid = new_ptid;
1528 }
1529 }
1530
1531 \f
1532 /* Resuming. */
1533
1534 /* Things to clean up if we QUIT out of resume (). */
1535 static void
1536 resume_cleanups (void *ignore)
1537 {
1538 normal_stop ();
1539 }
1540
1541 static const char schedlock_off[] = "off";
1542 static const char schedlock_on[] = "on";
1543 static const char schedlock_step[] = "step";
1544 static const char *scheduler_enums[] = {
1545 schedlock_off,
1546 schedlock_on,
1547 schedlock_step,
1548 NULL
1549 };
1550 static const char *scheduler_mode = schedlock_off;
1551 static void
1552 show_scheduler_mode (struct ui_file *file, int from_tty,
1553 struct cmd_list_element *c, const char *value)
1554 {
1555 fprintf_filtered (file,
1556 _("Mode for locking scheduler "
1557 "during execution is \"%s\".\n"),
1558 value);
1559 }
1560
1561 static void
1562 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1563 {
1564 if (!target_can_lock_scheduler)
1565 {
1566 scheduler_mode = schedlock_off;
1567 error (_("Target '%s' cannot support this command."), target_shortname);
1568 }
1569 }
1570
1571 /* True if execution commands resume all threads of all processes by
1572 default; otherwise, resume only threads of the current inferior
1573 process. */
1574 int sched_multi = 0;
1575
1576 /* Try to setup for software single stepping over the specified location.
1577 Return 1 if target_resume() should use hardware single step.
1578
1579 GDBARCH the current gdbarch.
1580 PC the location to step over. */
1581
1582 static int
1583 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1584 {
1585 int hw_step = 1;
1586
1587 if (execution_direction == EXEC_FORWARD
1588 && gdbarch_software_single_step_p (gdbarch)
1589 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1590 {
1591 hw_step = 0;
1592 /* Do not pull these breakpoints until after a `wait' in
1593 `wait_for_inferior'. */
1594 singlestep_breakpoints_inserted_p = 1;
1595 singlestep_ptid = inferior_ptid;
1596 singlestep_pc = pc;
1597 }
1598 return hw_step;
1599 }
1600
1601 /* Return a ptid representing the set of threads that we will proceed,
1602 in the perspective of the user/frontend. We may actually resume
1603 fewer threads at first, e.g., if a thread is stopped at a
1604 breakpoint that needs stepping-off, but that should not be visible
1605 to the user/frontend, and neither should the frontend/user be
1606 allowed to proceed any of the threads that happen to be stopped for
1607 internal run control handling, if a previous command wanted them
1608 resumed. */
1609
1610 ptid_t
1611 user_visible_resume_ptid (int step)
1612 {
1613 /* By default, resume all threads of all processes. */
1614 ptid_t resume_ptid = RESUME_ALL;
1615
1616 /* Maybe resume only all threads of the current process. */
1617 if (!sched_multi && target_supports_multi_process ())
1618 {
1619 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1620 }
1621
1622 /* Maybe resume a single thread after all. */
1623 if (non_stop)
1624 {
1625 /* With non-stop mode on, threads are always handled
1626 individually. */
1627 resume_ptid = inferior_ptid;
1628 }
1629 else if ((scheduler_mode == schedlock_on)
1630 || (scheduler_mode == schedlock_step
1631 && (step || singlestep_breakpoints_inserted_p)))
1632 {
1633 /* User-settable 'scheduler' mode requires solo thread resume. */
1634 resume_ptid = inferior_ptid;
1635 }
1636
1637 return resume_ptid;
1638 }
1639
1640 /* Resume the inferior, but allow a QUIT. This is useful if the user
1641 wants to interrupt some lengthy single-stepping operation
1642 (for child processes, the SIGINT goes to the inferior, and so
1643 we get a SIGINT random_signal, but for remote debugging and perhaps
1644 other targets, that's not true).
1645
1646 STEP nonzero if we should step (zero to continue instead).
1647 SIG is the signal to give the inferior (zero for none). */
1648 void
1649 resume (int step, enum target_signal sig)
1650 {
1651 int should_resume = 1;
1652 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1653 struct regcache *regcache = get_current_regcache ();
1654 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1655 struct thread_info *tp = inferior_thread ();
1656 CORE_ADDR pc = regcache_read_pc (regcache);
1657 struct address_space *aspace = get_regcache_aspace (regcache);
1658
1659 QUIT;
1660
1661 if (current_inferior ()->waiting_for_vfork_done)
1662 {
1663 /* Don't try to single-step a vfork parent that is waiting for
1664 the child to get out of the shared memory region (by exec'ing
1665 or exiting). This is particularly important on software
1666 single-step archs, as the child process would trip on the
1667 software single step breakpoint inserted for the parent
1668 process. Since the parent will not actually execute any
1669 instruction until the child is out of the shared region (such
1670 are vfork's semantics), it is safe to simply continue it.
1671 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1672 the parent, and tell it to `keep_going', which automatically
1673 re-sets it stepping. */
1674 if (debug_infrun)
1675 fprintf_unfiltered (gdb_stdlog,
1676 "infrun: resume : clear step\n");
1677 step = 0;
1678 }
1679
1680 if (debug_infrun)
1681 fprintf_unfiltered (gdb_stdlog,
1682 "infrun: resume (step=%d, signal=%d), "
1683 "trap_expected=%d, current thread [%s] at %s\n",
1684 step, sig, tp->control.trap_expected,
1685 target_pid_to_str (inferior_ptid),
1686 paddress (gdbarch, pc));
1687
1688 /* Normally, by the time we reach `resume', the breakpoints are either
1689 removed or inserted, as appropriate. The exception is if we're sitting
1690 at a permanent breakpoint; we need to step over it, but permanent
1691 breakpoints can't be removed. So we have to test for it here. */
1692 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1693 {
1694 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1695 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1696 else
1697 error (_("\
1698 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1699 how to step past a permanent breakpoint on this architecture. Try using\n\
1700 a command like `return' or `jump' to continue execution."));
1701 }
1702
1703 /* If enabled, step over breakpoints by executing a copy of the
1704 instruction at a different address.
1705
1706 We can't use displaced stepping when we have a signal to deliver;
1707 the comments for displaced_step_prepare explain why. The
1708 comments in the handle_inferior event for dealing with 'random
1709 signals' explain what we do instead.
1710
1711 We can't use displaced stepping when we are waiting for vfork_done
1712 event, displaced stepping breaks the vfork child similarly as single
1713 step software breakpoint. */
1714 if (use_displaced_stepping (gdbarch)
1715 && (tp->control.trap_expected
1716 || (step && gdbarch_software_single_step_p (gdbarch)))
1717 && sig == TARGET_SIGNAL_0
1718 && !current_inferior ()->waiting_for_vfork_done)
1719 {
1720 struct displaced_step_inferior_state *displaced;
1721
1722 if (!displaced_step_prepare (inferior_ptid))
1723 {
1724 /* Got placed in displaced stepping queue. Will be resumed
1725 later when all the currently queued displaced stepping
1726 requests finish. The thread is not executing at this point,
1727 and the call to set_executing will be made later. But we
1728 need to call set_running here, since from frontend point of view,
1729 the thread is running. */
1730 set_running (inferior_ptid, 1);
1731 discard_cleanups (old_cleanups);
1732 return;
1733 }
1734
1735 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1736 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1737 displaced->step_closure);
1738 }
1739
1740 /* Do we need to do it the hard way, w/temp breakpoints? */
1741 else if (step)
1742 step = maybe_software_singlestep (gdbarch, pc);
1743
1744 /* Currently, our software single-step implementation leads to different
1745 results than hardware single-stepping in one situation: when stepping
1746 into delivering a signal which has an associated signal handler,
1747 hardware single-step will stop at the first instruction of the handler,
1748 while software single-step will simply skip execution of the handler.
1749
1750 For now, this difference in behavior is accepted since there is no
1751 easy way to actually implement single-stepping into a signal handler
1752 without kernel support.
1753
1754 However, there is one scenario where this difference leads to follow-on
1755 problems: if we're stepping off a breakpoint by removing all breakpoints
1756 and then single-stepping. In this case, the software single-step
1757 behavior means that even if there is a *breakpoint* in the signal
1758 handler, GDB still would not stop.
1759
1760 Fortunately, we can at least fix this particular issue. We detect
1761 here the case where we are about to deliver a signal while software
1762 single-stepping with breakpoints removed. In this situation, we
1763 revert the decisions to remove all breakpoints and insert single-
1764 step breakpoints, and instead we install a step-resume breakpoint
1765 at the current address, deliver the signal without stepping, and
1766 once we arrive back at the step-resume breakpoint, actually step
1767 over the breakpoint we originally wanted to step over. */
1768 if (singlestep_breakpoints_inserted_p
1769 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1770 {
1771 /* If we have nested signals or a pending signal is delivered
1772 immediately after a handler returns, might might already have
1773 a step-resume breakpoint set on the earlier handler. We cannot
1774 set another step-resume breakpoint; just continue on until the
1775 original breakpoint is hit. */
1776 if (tp->control.step_resume_breakpoint == NULL)
1777 {
1778 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1779 tp->step_after_step_resume_breakpoint = 1;
1780 }
1781
1782 remove_single_step_breakpoints ();
1783 singlestep_breakpoints_inserted_p = 0;
1784
1785 insert_breakpoints ();
1786 tp->control.trap_expected = 0;
1787 }
1788
1789 if (should_resume)
1790 {
1791 ptid_t resume_ptid;
1792
1793 /* If STEP is set, it's a request to use hardware stepping
1794 facilities. But in that case, we should never
1795 use singlestep breakpoint. */
1796 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1797
1798 /* Decide the set of threads to ask the target to resume. Start
1799 by assuming everything will be resumed, than narrow the set
1800 by applying increasingly restricting conditions. */
1801 resume_ptid = user_visible_resume_ptid (step);
1802
1803 /* Maybe resume a single thread after all. */
1804 if (singlestep_breakpoints_inserted_p
1805 && stepping_past_singlestep_breakpoint)
1806 {
1807 /* The situation here is as follows. In thread T1 we wanted to
1808 single-step. Lacking hardware single-stepping we've
1809 set breakpoint at the PC of the next instruction -- call it
1810 P. After resuming, we've hit that breakpoint in thread T2.
1811 Now we've removed original breakpoint, inserted breakpoint
1812 at P+1, and try to step to advance T2 past breakpoint.
1813 We need to step only T2, as if T1 is allowed to freely run,
1814 it can run past P, and if other threads are allowed to run,
1815 they can hit breakpoint at P+1, and nested hits of single-step
1816 breakpoints is not something we'd want -- that's complicated
1817 to support, and has no value. */
1818 resume_ptid = inferior_ptid;
1819 }
1820 else if ((step || singlestep_breakpoints_inserted_p)
1821 && tp->control.trap_expected)
1822 {
1823 /* We're allowing a thread to run past a breakpoint it has
1824 hit, by single-stepping the thread with the breakpoint
1825 removed. In which case, we need to single-step only this
1826 thread, and keep others stopped, as they can miss this
1827 breakpoint if allowed to run.
1828
1829 The current code actually removes all breakpoints when
1830 doing this, not just the one being stepped over, so if we
1831 let other threads run, we can actually miss any
1832 breakpoint, not just the one at PC. */
1833 resume_ptid = inferior_ptid;
1834 }
1835
1836 if (gdbarch_cannot_step_breakpoint (gdbarch))
1837 {
1838 /* Most targets can step a breakpoint instruction, thus
1839 executing it normally. But if this one cannot, just
1840 continue and we will hit it anyway. */
1841 if (step && breakpoint_inserted_here_p (aspace, pc))
1842 step = 0;
1843 }
1844
1845 if (debug_displaced
1846 && use_displaced_stepping (gdbarch)
1847 && tp->control.trap_expected)
1848 {
1849 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1850 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1851 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1852 gdb_byte buf[4];
1853
1854 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1855 paddress (resume_gdbarch, actual_pc));
1856 read_memory (actual_pc, buf, sizeof (buf));
1857 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1858 }
1859
1860 /* Install inferior's terminal modes. */
1861 target_terminal_inferior ();
1862
1863 /* Avoid confusing the next resume, if the next stop/resume
1864 happens to apply to another thread. */
1865 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1866
1867 /* Advise target which signals may be handled silently. If we have
1868 removed breakpoints because we are stepping over one (which can
1869 happen only if we are not using displaced stepping), we need to
1870 receive all signals to avoid accidentally skipping a breakpoint
1871 during execution of a signal handler. */
1872 if ((step || singlestep_breakpoints_inserted_p)
1873 && tp->control.trap_expected
1874 && !use_displaced_stepping (gdbarch))
1875 target_pass_signals (0, NULL);
1876 else
1877 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1878
1879 target_resume (resume_ptid, step, sig);
1880 }
1881
1882 discard_cleanups (old_cleanups);
1883 }
1884 \f
1885 /* Proceeding. */
1886
1887 /* Clear out all variables saying what to do when inferior is continued.
1888 First do this, then set the ones you want, then call `proceed'. */
1889
1890 static void
1891 clear_proceed_status_thread (struct thread_info *tp)
1892 {
1893 if (debug_infrun)
1894 fprintf_unfiltered (gdb_stdlog,
1895 "infrun: clear_proceed_status_thread (%s)\n",
1896 target_pid_to_str (tp->ptid));
1897
1898 tp->control.trap_expected = 0;
1899 tp->control.step_range_start = 0;
1900 tp->control.step_range_end = 0;
1901 tp->control.step_frame_id = null_frame_id;
1902 tp->control.step_stack_frame_id = null_frame_id;
1903 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1904 tp->stop_requested = 0;
1905
1906 tp->control.stop_step = 0;
1907
1908 tp->control.proceed_to_finish = 0;
1909
1910 /* Discard any remaining commands or status from previous stop. */
1911 bpstat_clear (&tp->control.stop_bpstat);
1912 }
1913
1914 static int
1915 clear_proceed_status_callback (struct thread_info *tp, void *data)
1916 {
1917 if (is_exited (tp->ptid))
1918 return 0;
1919
1920 clear_proceed_status_thread (tp);
1921 return 0;
1922 }
1923
1924 void
1925 clear_proceed_status (void)
1926 {
1927 if (!non_stop)
1928 {
1929 /* In all-stop mode, delete the per-thread status of all
1930 threads, even if inferior_ptid is null_ptid, there may be
1931 threads on the list. E.g., we may be launching a new
1932 process, while selecting the executable. */
1933 iterate_over_threads (clear_proceed_status_callback, NULL);
1934 }
1935
1936 if (!ptid_equal (inferior_ptid, null_ptid))
1937 {
1938 struct inferior *inferior;
1939
1940 if (non_stop)
1941 {
1942 /* If in non-stop mode, only delete the per-thread status of
1943 the current thread. */
1944 clear_proceed_status_thread (inferior_thread ());
1945 }
1946
1947 inferior = current_inferior ();
1948 inferior->control.stop_soon = NO_STOP_QUIETLY;
1949 }
1950
1951 stop_after_trap = 0;
1952
1953 observer_notify_about_to_proceed ();
1954
1955 if (stop_registers)
1956 {
1957 regcache_xfree (stop_registers);
1958 stop_registers = NULL;
1959 }
1960 }
1961
1962 /* Check the current thread against the thread that reported the most recent
1963 event. If a step-over is required return TRUE and set the current thread
1964 to the old thread. Otherwise return FALSE.
1965
1966 This should be suitable for any targets that support threads. */
1967
1968 static int
1969 prepare_to_proceed (int step)
1970 {
1971 ptid_t wait_ptid;
1972 struct target_waitstatus wait_status;
1973 int schedlock_enabled;
1974
1975 /* With non-stop mode on, threads are always handled individually. */
1976 gdb_assert (! non_stop);
1977
1978 /* Get the last target status returned by target_wait(). */
1979 get_last_target_status (&wait_ptid, &wait_status);
1980
1981 /* Make sure we were stopped at a breakpoint. */
1982 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1983 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1984 && wait_status.value.sig != TARGET_SIGNAL_ILL
1985 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1986 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1987 {
1988 return 0;
1989 }
1990
1991 schedlock_enabled = (scheduler_mode == schedlock_on
1992 || (scheduler_mode == schedlock_step
1993 && step));
1994
1995 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1996 if (schedlock_enabled)
1997 return 0;
1998
1999 /* Don't switch over if we're about to resume some other process
2000 other than WAIT_PTID's, and schedule-multiple is off. */
2001 if (!sched_multi
2002 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2003 return 0;
2004
2005 /* Switched over from WAIT_PID. */
2006 if (!ptid_equal (wait_ptid, minus_one_ptid)
2007 && !ptid_equal (inferior_ptid, wait_ptid))
2008 {
2009 struct regcache *regcache = get_thread_regcache (wait_ptid);
2010
2011 if (breakpoint_here_p (get_regcache_aspace (regcache),
2012 regcache_read_pc (regcache)))
2013 {
2014 /* If stepping, remember current thread to switch back to. */
2015 if (step)
2016 deferred_step_ptid = inferior_ptid;
2017
2018 /* Switch back to WAIT_PID thread. */
2019 switch_to_thread (wait_ptid);
2020
2021 if (debug_infrun)
2022 fprintf_unfiltered (gdb_stdlog,
2023 "infrun: prepare_to_proceed (step=%d), "
2024 "switched to [%s]\n",
2025 step, target_pid_to_str (inferior_ptid));
2026
2027 /* We return 1 to indicate that there is a breakpoint here,
2028 so we need to step over it before continuing to avoid
2029 hitting it straight away. */
2030 return 1;
2031 }
2032 }
2033
2034 return 0;
2035 }
2036
2037 /* Basic routine for continuing the program in various fashions.
2038
2039 ADDR is the address to resume at, or -1 for resume where stopped.
2040 SIGGNAL is the signal to give it, or 0 for none,
2041 or -1 for act according to how it stopped.
2042 STEP is nonzero if should trap after one instruction.
2043 -1 means return after that and print nothing.
2044 You should probably set various step_... variables
2045 before calling here, if you are stepping.
2046
2047 You should call clear_proceed_status before calling proceed. */
2048
2049 void
2050 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2051 {
2052 struct regcache *regcache;
2053 struct gdbarch *gdbarch;
2054 struct thread_info *tp;
2055 CORE_ADDR pc;
2056 struct address_space *aspace;
2057 int oneproc = 0;
2058
2059 /* If we're stopped at a fork/vfork, follow the branch set by the
2060 "set follow-fork-mode" command; otherwise, we'll just proceed
2061 resuming the current thread. */
2062 if (!follow_fork ())
2063 {
2064 /* The target for some reason decided not to resume. */
2065 normal_stop ();
2066 if (target_can_async_p ())
2067 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2068 return;
2069 }
2070
2071 /* We'll update this if & when we switch to a new thread. */
2072 previous_inferior_ptid = inferior_ptid;
2073
2074 regcache = get_current_regcache ();
2075 gdbarch = get_regcache_arch (regcache);
2076 aspace = get_regcache_aspace (regcache);
2077 pc = regcache_read_pc (regcache);
2078
2079 if (step > 0)
2080 step_start_function = find_pc_function (pc);
2081 if (step < 0)
2082 stop_after_trap = 1;
2083
2084 if (addr == (CORE_ADDR) -1)
2085 {
2086 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2087 && execution_direction != EXEC_REVERSE)
2088 /* There is a breakpoint at the address we will resume at,
2089 step one instruction before inserting breakpoints so that
2090 we do not stop right away (and report a second hit at this
2091 breakpoint).
2092
2093 Note, we don't do this in reverse, because we won't
2094 actually be executing the breakpoint insn anyway.
2095 We'll be (un-)executing the previous instruction. */
2096
2097 oneproc = 1;
2098 else if (gdbarch_single_step_through_delay_p (gdbarch)
2099 && gdbarch_single_step_through_delay (gdbarch,
2100 get_current_frame ()))
2101 /* We stepped onto an instruction that needs to be stepped
2102 again before re-inserting the breakpoint, do so. */
2103 oneproc = 1;
2104 }
2105 else
2106 {
2107 regcache_write_pc (regcache, addr);
2108 }
2109
2110 if (debug_infrun)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2113 paddress (gdbarch, addr), siggnal, step);
2114
2115 if (non_stop)
2116 /* In non-stop, each thread is handled individually. The context
2117 must already be set to the right thread here. */
2118 ;
2119 else
2120 {
2121 /* In a multi-threaded task we may select another thread and
2122 then continue or step.
2123
2124 But if the old thread was stopped at a breakpoint, it will
2125 immediately cause another breakpoint stop without any
2126 execution (i.e. it will report a breakpoint hit incorrectly).
2127 So we must step over it first.
2128
2129 prepare_to_proceed checks the current thread against the
2130 thread that reported the most recent event. If a step-over
2131 is required it returns TRUE and sets the current thread to
2132 the old thread. */
2133 if (prepare_to_proceed (step))
2134 oneproc = 1;
2135 }
2136
2137 /* prepare_to_proceed may change the current thread. */
2138 tp = inferior_thread ();
2139
2140 if (oneproc)
2141 {
2142 tp->control.trap_expected = 1;
2143 /* If displaced stepping is enabled, we can step over the
2144 breakpoint without hitting it, so leave all breakpoints
2145 inserted. Otherwise we need to disable all breakpoints, step
2146 one instruction, and then re-add them when that step is
2147 finished. */
2148 if (!use_displaced_stepping (gdbarch))
2149 remove_breakpoints ();
2150 }
2151
2152 /* We can insert breakpoints if we're not trying to step over one,
2153 or if we are stepping over one but we're using displaced stepping
2154 to do so. */
2155 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2156 insert_breakpoints ();
2157
2158 if (!non_stop)
2159 {
2160 /* Pass the last stop signal to the thread we're resuming,
2161 irrespective of whether the current thread is the thread that
2162 got the last event or not. This was historically GDB's
2163 behaviour before keeping a stop_signal per thread. */
2164
2165 struct thread_info *last_thread;
2166 ptid_t last_ptid;
2167 struct target_waitstatus last_status;
2168
2169 get_last_target_status (&last_ptid, &last_status);
2170 if (!ptid_equal (inferior_ptid, last_ptid)
2171 && !ptid_equal (last_ptid, null_ptid)
2172 && !ptid_equal (last_ptid, minus_one_ptid))
2173 {
2174 last_thread = find_thread_ptid (last_ptid);
2175 if (last_thread)
2176 {
2177 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2178 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2179 }
2180 }
2181 }
2182
2183 if (siggnal != TARGET_SIGNAL_DEFAULT)
2184 tp->suspend.stop_signal = siggnal;
2185 /* If this signal should not be seen by program,
2186 give it zero. Used for debugging signals. */
2187 else if (!signal_program[tp->suspend.stop_signal])
2188 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2189
2190 annotate_starting ();
2191
2192 /* Make sure that output from GDB appears before output from the
2193 inferior. */
2194 gdb_flush (gdb_stdout);
2195
2196 /* Refresh prev_pc value just prior to resuming. This used to be
2197 done in stop_stepping, however, setting prev_pc there did not handle
2198 scenarios such as inferior function calls or returning from
2199 a function via the return command. In those cases, the prev_pc
2200 value was not set properly for subsequent commands. The prev_pc value
2201 is used to initialize the starting line number in the ecs. With an
2202 invalid value, the gdb next command ends up stopping at the position
2203 represented by the next line table entry past our start position.
2204 On platforms that generate one line table entry per line, this
2205 is not a problem. However, on the ia64, the compiler generates
2206 extraneous line table entries that do not increase the line number.
2207 When we issue the gdb next command on the ia64 after an inferior call
2208 or a return command, we often end up a few instructions forward, still
2209 within the original line we started.
2210
2211 An attempt was made to refresh the prev_pc at the same time the
2212 execution_control_state is initialized (for instance, just before
2213 waiting for an inferior event). But this approach did not work
2214 because of platforms that use ptrace, where the pc register cannot
2215 be read unless the inferior is stopped. At that point, we are not
2216 guaranteed the inferior is stopped and so the regcache_read_pc() call
2217 can fail. Setting the prev_pc value here ensures the value is updated
2218 correctly when the inferior is stopped. */
2219 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2220
2221 /* Fill in with reasonable starting values. */
2222 init_thread_stepping_state (tp);
2223
2224 /* Reset to normal state. */
2225 init_infwait_state ();
2226
2227 /* Resume inferior. */
2228 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2229
2230 /* Wait for it to stop (if not standalone)
2231 and in any case decode why it stopped, and act accordingly. */
2232 /* Do this only if we are not using the event loop, or if the target
2233 does not support asynchronous execution. */
2234 if (!target_can_async_p ())
2235 {
2236 wait_for_inferior ();
2237 normal_stop ();
2238 }
2239 }
2240 \f
2241
2242 /* Start remote-debugging of a machine over a serial link. */
2243
2244 void
2245 start_remote (int from_tty)
2246 {
2247 struct inferior *inferior;
2248
2249 init_wait_for_inferior ();
2250 inferior = current_inferior ();
2251 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2252
2253 /* Always go on waiting for the target, regardless of the mode. */
2254 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2255 indicate to wait_for_inferior that a target should timeout if
2256 nothing is returned (instead of just blocking). Because of this,
2257 targets expecting an immediate response need to, internally, set
2258 things up so that the target_wait() is forced to eventually
2259 timeout. */
2260 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2261 differentiate to its caller what the state of the target is after
2262 the initial open has been performed. Here we're assuming that
2263 the target has stopped. It should be possible to eventually have
2264 target_open() return to the caller an indication that the target
2265 is currently running and GDB state should be set to the same as
2266 for an async run. */
2267 wait_for_inferior ();
2268
2269 /* Now that the inferior has stopped, do any bookkeeping like
2270 loading shared libraries. We want to do this before normal_stop,
2271 so that the displayed frame is up to date. */
2272 post_create_inferior (&current_target, from_tty);
2273
2274 normal_stop ();
2275 }
2276
2277 /* Initialize static vars when a new inferior begins. */
2278
2279 void
2280 init_wait_for_inferior (void)
2281 {
2282 /* These are meaningless until the first time through wait_for_inferior. */
2283
2284 breakpoint_init_inferior (inf_starting);
2285
2286 clear_proceed_status ();
2287
2288 stepping_past_singlestep_breakpoint = 0;
2289 deferred_step_ptid = null_ptid;
2290
2291 target_last_wait_ptid = minus_one_ptid;
2292
2293 previous_inferior_ptid = inferior_ptid;
2294 init_infwait_state ();
2295
2296 /* Discard any skipped inlined frames. */
2297 clear_inline_frame_state (minus_one_ptid);
2298 }
2299
2300 \f
2301 /* This enum encodes possible reasons for doing a target_wait, so that
2302 wfi can call target_wait in one place. (Ultimately the call will be
2303 moved out of the infinite loop entirely.) */
2304
2305 enum infwait_states
2306 {
2307 infwait_normal_state,
2308 infwait_thread_hop_state,
2309 infwait_step_watch_state,
2310 infwait_nonstep_watch_state
2311 };
2312
2313 /* The PTID we'll do a target_wait on.*/
2314 ptid_t waiton_ptid;
2315
2316 /* Current inferior wait state. */
2317 enum infwait_states infwait_state;
2318
2319 /* Data to be passed around while handling an event. This data is
2320 discarded between events. */
2321 struct execution_control_state
2322 {
2323 ptid_t ptid;
2324 /* The thread that got the event, if this was a thread event; NULL
2325 otherwise. */
2326 struct thread_info *event_thread;
2327
2328 struct target_waitstatus ws;
2329 int random_signal;
2330 CORE_ADDR stop_func_start;
2331 CORE_ADDR stop_func_end;
2332 char *stop_func_name;
2333 int new_thread_event;
2334 int wait_some_more;
2335 };
2336
2337 static void handle_inferior_event (struct execution_control_state *ecs);
2338
2339 static void handle_step_into_function (struct gdbarch *gdbarch,
2340 struct execution_control_state *ecs);
2341 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2342 struct execution_control_state *ecs);
2343 static void check_exception_resume (struct execution_control_state *,
2344 struct frame_info *, struct symbol *);
2345
2346 static void stop_stepping (struct execution_control_state *ecs);
2347 static void prepare_to_wait (struct execution_control_state *ecs);
2348 static void keep_going (struct execution_control_state *ecs);
2349
2350 /* Callback for iterate over threads. If the thread is stopped, but
2351 the user/frontend doesn't know about that yet, go through
2352 normal_stop, as if the thread had just stopped now. ARG points at
2353 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2354 ptid_is_pid(PTID) is true, applies to all threads of the process
2355 pointed at by PTID. Otherwise, apply only to the thread pointed by
2356 PTID. */
2357
2358 static int
2359 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2360 {
2361 ptid_t ptid = * (ptid_t *) arg;
2362
2363 if ((ptid_equal (info->ptid, ptid)
2364 || ptid_equal (minus_one_ptid, ptid)
2365 || (ptid_is_pid (ptid)
2366 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2367 && is_running (info->ptid)
2368 && !is_executing (info->ptid))
2369 {
2370 struct cleanup *old_chain;
2371 struct execution_control_state ecss;
2372 struct execution_control_state *ecs = &ecss;
2373
2374 memset (ecs, 0, sizeof (*ecs));
2375
2376 old_chain = make_cleanup_restore_current_thread ();
2377
2378 switch_to_thread (info->ptid);
2379
2380 /* Go through handle_inferior_event/normal_stop, so we always
2381 have consistent output as if the stop event had been
2382 reported. */
2383 ecs->ptid = info->ptid;
2384 ecs->event_thread = find_thread_ptid (info->ptid);
2385 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2386 ecs->ws.value.sig = TARGET_SIGNAL_0;
2387
2388 handle_inferior_event (ecs);
2389
2390 if (!ecs->wait_some_more)
2391 {
2392 struct thread_info *tp;
2393
2394 normal_stop ();
2395
2396 /* Finish off the continuations. */
2397 tp = inferior_thread ();
2398 do_all_intermediate_continuations_thread (tp, 1);
2399 do_all_continuations_thread (tp, 1);
2400 }
2401
2402 do_cleanups (old_chain);
2403 }
2404
2405 return 0;
2406 }
2407
2408 /* This function is attached as a "thread_stop_requested" observer.
2409 Cleanup local state that assumed the PTID was to be resumed, and
2410 report the stop to the frontend. */
2411
2412 static void
2413 infrun_thread_stop_requested (ptid_t ptid)
2414 {
2415 struct displaced_step_inferior_state *displaced;
2416
2417 /* PTID was requested to stop. Remove it from the displaced
2418 stepping queue, so we don't try to resume it automatically. */
2419
2420 for (displaced = displaced_step_inferior_states;
2421 displaced;
2422 displaced = displaced->next)
2423 {
2424 struct displaced_step_request *it, **prev_next_p;
2425
2426 it = displaced->step_request_queue;
2427 prev_next_p = &displaced->step_request_queue;
2428 while (it)
2429 {
2430 if (ptid_match (it->ptid, ptid))
2431 {
2432 *prev_next_p = it->next;
2433 it->next = NULL;
2434 xfree (it);
2435 }
2436 else
2437 {
2438 prev_next_p = &it->next;
2439 }
2440
2441 it = *prev_next_p;
2442 }
2443 }
2444
2445 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2446 }
2447
2448 static void
2449 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2450 {
2451 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2452 nullify_last_target_wait_ptid ();
2453 }
2454
2455 /* Callback for iterate_over_threads. */
2456
2457 static int
2458 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2459 {
2460 if (is_exited (info->ptid))
2461 return 0;
2462
2463 delete_step_resume_breakpoint (info);
2464 delete_exception_resume_breakpoint (info);
2465 return 0;
2466 }
2467
2468 /* In all-stop, delete the step resume breakpoint of any thread that
2469 had one. In non-stop, delete the step resume breakpoint of the
2470 thread that just stopped. */
2471
2472 static void
2473 delete_step_thread_step_resume_breakpoint (void)
2474 {
2475 if (!target_has_execution
2476 || ptid_equal (inferior_ptid, null_ptid))
2477 /* If the inferior has exited, we have already deleted the step
2478 resume breakpoints out of GDB's lists. */
2479 return;
2480
2481 if (non_stop)
2482 {
2483 /* If in non-stop mode, only delete the step-resume or
2484 longjmp-resume breakpoint of the thread that just stopped
2485 stepping. */
2486 struct thread_info *tp = inferior_thread ();
2487
2488 delete_step_resume_breakpoint (tp);
2489 delete_exception_resume_breakpoint (tp);
2490 }
2491 else
2492 /* In all-stop mode, delete all step-resume and longjmp-resume
2493 breakpoints of any thread that had them. */
2494 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2495 }
2496
2497 /* A cleanup wrapper. */
2498
2499 static void
2500 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2501 {
2502 delete_step_thread_step_resume_breakpoint ();
2503 }
2504
2505 /* Pretty print the results of target_wait, for debugging purposes. */
2506
2507 static void
2508 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2509 const struct target_waitstatus *ws)
2510 {
2511 char *status_string = target_waitstatus_to_string (ws);
2512 struct ui_file *tmp_stream = mem_fileopen ();
2513 char *text;
2514
2515 /* The text is split over several lines because it was getting too long.
2516 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2517 output as a unit; we want only one timestamp printed if debug_timestamp
2518 is set. */
2519
2520 fprintf_unfiltered (tmp_stream,
2521 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2522 if (PIDGET (waiton_ptid) != -1)
2523 fprintf_unfiltered (tmp_stream,
2524 " [%s]", target_pid_to_str (waiton_ptid));
2525 fprintf_unfiltered (tmp_stream, ", status) =\n");
2526 fprintf_unfiltered (tmp_stream,
2527 "infrun: %d [%s],\n",
2528 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2529 fprintf_unfiltered (tmp_stream,
2530 "infrun: %s\n",
2531 status_string);
2532
2533 text = ui_file_xstrdup (tmp_stream, NULL);
2534
2535 /* This uses %s in part to handle %'s in the text, but also to avoid
2536 a gcc error: the format attribute requires a string literal. */
2537 fprintf_unfiltered (gdb_stdlog, "%s", text);
2538
2539 xfree (status_string);
2540 xfree (text);
2541 ui_file_delete (tmp_stream);
2542 }
2543
2544 /* Prepare and stabilize the inferior for detaching it. E.g.,
2545 detaching while a thread is displaced stepping is a recipe for
2546 crashing it, as nothing would readjust the PC out of the scratch
2547 pad. */
2548
2549 void
2550 prepare_for_detach (void)
2551 {
2552 struct inferior *inf = current_inferior ();
2553 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2554 struct cleanup *old_chain_1;
2555 struct displaced_step_inferior_state *displaced;
2556
2557 displaced = get_displaced_stepping_state (inf->pid);
2558
2559 /* Is any thread of this process displaced stepping? If not,
2560 there's nothing else to do. */
2561 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2562 return;
2563
2564 if (debug_infrun)
2565 fprintf_unfiltered (gdb_stdlog,
2566 "displaced-stepping in-process while detaching");
2567
2568 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2569 inf->detaching = 1;
2570
2571 while (!ptid_equal (displaced->step_ptid, null_ptid))
2572 {
2573 struct cleanup *old_chain_2;
2574 struct execution_control_state ecss;
2575 struct execution_control_state *ecs;
2576
2577 ecs = &ecss;
2578 memset (ecs, 0, sizeof (*ecs));
2579
2580 overlay_cache_invalid = 1;
2581
2582 /* We have to invalidate the registers BEFORE calling
2583 target_wait because they can be loaded from the target while
2584 in target_wait. This makes remote debugging a bit more
2585 efficient for those targets that provide critical registers
2586 as part of their normal status mechanism. */
2587
2588 registers_changed ();
2589
2590 if (deprecated_target_wait_hook)
2591 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2592 else
2593 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2594
2595 if (debug_infrun)
2596 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2597
2598 /* If an error happens while handling the event, propagate GDB's
2599 knowledge of the executing state to the frontend/user running
2600 state. */
2601 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2602 &minus_one_ptid);
2603
2604 /* In non-stop mode, each thread is handled individually.
2605 Switch early, so the global state is set correctly for this
2606 thread. */
2607 if (non_stop
2608 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2609 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2610 context_switch (ecs->ptid);
2611
2612 /* Now figure out what to do with the result of the result. */
2613 handle_inferior_event (ecs);
2614
2615 /* No error, don't finish the state yet. */
2616 discard_cleanups (old_chain_2);
2617
2618 /* Breakpoints and watchpoints are not installed on the target
2619 at this point, and signals are passed directly to the
2620 inferior, so this must mean the process is gone. */
2621 if (!ecs->wait_some_more)
2622 {
2623 discard_cleanups (old_chain_1);
2624 error (_("Program exited while detaching"));
2625 }
2626 }
2627
2628 discard_cleanups (old_chain_1);
2629 }
2630
2631 /* Wait for control to return from inferior to debugger.
2632
2633 If inferior gets a signal, we may decide to start it up again
2634 instead of returning. That is why there is a loop in this function.
2635 When this function actually returns it means the inferior
2636 should be left stopped and GDB should read more commands. */
2637
2638 void
2639 wait_for_inferior (void)
2640 {
2641 struct cleanup *old_cleanups;
2642 struct execution_control_state ecss;
2643 struct execution_control_state *ecs;
2644
2645 if (debug_infrun)
2646 fprintf_unfiltered
2647 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2648
2649 old_cleanups =
2650 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2651
2652 ecs = &ecss;
2653 memset (ecs, 0, sizeof (*ecs));
2654
2655 while (1)
2656 {
2657 struct cleanup *old_chain;
2658
2659 /* We have to invalidate the registers BEFORE calling target_wait
2660 because they can be loaded from the target while in target_wait.
2661 This makes remote debugging a bit more efficient for those
2662 targets that provide critical registers as part of their normal
2663 status mechanism. */
2664
2665 overlay_cache_invalid = 1;
2666 registers_changed ();
2667
2668 if (deprecated_target_wait_hook)
2669 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2670 else
2671 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2672
2673 if (debug_infrun)
2674 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2675
2676 /* If an error happens while handling the event, propagate GDB's
2677 knowledge of the executing state to the frontend/user running
2678 state. */
2679 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2680
2681 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2682 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2683 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2684
2685 /* Now figure out what to do with the result of the result. */
2686 handle_inferior_event (ecs);
2687
2688 /* No error, don't finish the state yet. */
2689 discard_cleanups (old_chain);
2690
2691 if (!ecs->wait_some_more)
2692 break;
2693 }
2694
2695 do_cleanups (old_cleanups);
2696 }
2697
2698 /* Asynchronous version of wait_for_inferior. It is called by the
2699 event loop whenever a change of state is detected on the file
2700 descriptor corresponding to the target. It can be called more than
2701 once to complete a single execution command. In such cases we need
2702 to keep the state in a global variable ECSS. If it is the last time
2703 that this function is called for a single execution command, then
2704 report to the user that the inferior has stopped, and do the
2705 necessary cleanups. */
2706
2707 void
2708 fetch_inferior_event (void *client_data)
2709 {
2710 struct execution_control_state ecss;
2711 struct execution_control_state *ecs = &ecss;
2712 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2713 struct cleanup *ts_old_chain;
2714 int was_sync = sync_execution;
2715
2716 memset (ecs, 0, sizeof (*ecs));
2717
2718 /* We're handling a live event, so make sure we're doing live
2719 debugging. If we're looking at traceframes while the target is
2720 running, we're going to need to get back to that mode after
2721 handling the event. */
2722 if (non_stop)
2723 {
2724 make_cleanup_restore_current_traceframe ();
2725 set_current_traceframe (-1);
2726 }
2727
2728 if (non_stop)
2729 /* In non-stop mode, the user/frontend should not notice a thread
2730 switch due to internal events. Make sure we reverse to the
2731 user selected thread and frame after handling the event and
2732 running any breakpoint commands. */
2733 make_cleanup_restore_current_thread ();
2734
2735 /* We have to invalidate the registers BEFORE calling target_wait
2736 because they can be loaded from the target while in target_wait.
2737 This makes remote debugging a bit more efficient for those
2738 targets that provide critical registers as part of their normal
2739 status mechanism. */
2740
2741 overlay_cache_invalid = 1;
2742 registers_changed ();
2743
2744 make_cleanup_restore_integer (&execution_direction);
2745 execution_direction = target_execution_direction ();
2746
2747 if (deprecated_target_wait_hook)
2748 ecs->ptid =
2749 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2750 else
2751 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2752
2753 if (debug_infrun)
2754 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2755
2756 if (non_stop
2757 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2758 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2759 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2760 /* In non-stop mode, each thread is handled individually. Switch
2761 early, so the global state is set correctly for this
2762 thread. */
2763 context_switch (ecs->ptid);
2764
2765 /* If an error happens while handling the event, propagate GDB's
2766 knowledge of the executing state to the frontend/user running
2767 state. */
2768 if (!non_stop)
2769 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2770 else
2771 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2772
2773 /* Now figure out what to do with the result of the result. */
2774 handle_inferior_event (ecs);
2775
2776 if (!ecs->wait_some_more)
2777 {
2778 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2779
2780 delete_step_thread_step_resume_breakpoint ();
2781
2782 /* We may not find an inferior if this was a process exit. */
2783 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2784 normal_stop ();
2785
2786 if (target_has_execution
2787 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2788 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2789 && ecs->event_thread->step_multi
2790 && ecs->event_thread->control.stop_step)
2791 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2792 else
2793 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2794 }
2795
2796 /* No error, don't finish the thread states yet. */
2797 discard_cleanups (ts_old_chain);
2798
2799 /* Revert thread and frame. */
2800 do_cleanups (old_chain);
2801
2802 /* If the inferior was in sync execution mode, and now isn't,
2803 restore the prompt. */
2804 if (was_sync && !sync_execution)
2805 display_gdb_prompt (0);
2806 }
2807
2808 /* Record the frame and location we're currently stepping through. */
2809 void
2810 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2811 {
2812 struct thread_info *tp = inferior_thread ();
2813
2814 tp->control.step_frame_id = get_frame_id (frame);
2815 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2816
2817 tp->current_symtab = sal.symtab;
2818 tp->current_line = sal.line;
2819 }
2820
2821 /* Clear context switchable stepping state. */
2822
2823 void
2824 init_thread_stepping_state (struct thread_info *tss)
2825 {
2826 tss->stepping_over_breakpoint = 0;
2827 tss->step_after_step_resume_breakpoint = 0;
2828 tss->stepping_through_solib_after_catch = 0;
2829 tss->stepping_through_solib_catchpoints = NULL;
2830 }
2831
2832 /* Return the cached copy of the last pid/waitstatus returned by
2833 target_wait()/deprecated_target_wait_hook(). The data is actually
2834 cached by handle_inferior_event(), which gets called immediately
2835 after target_wait()/deprecated_target_wait_hook(). */
2836
2837 void
2838 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2839 {
2840 *ptidp = target_last_wait_ptid;
2841 *status = target_last_waitstatus;
2842 }
2843
2844 void
2845 nullify_last_target_wait_ptid (void)
2846 {
2847 target_last_wait_ptid = minus_one_ptid;
2848 }
2849
2850 /* Switch thread contexts. */
2851
2852 static void
2853 context_switch (ptid_t ptid)
2854 {
2855 if (debug_infrun)
2856 {
2857 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2858 target_pid_to_str (inferior_ptid));
2859 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2860 target_pid_to_str (ptid));
2861 }
2862
2863 switch_to_thread (ptid);
2864 }
2865
2866 static void
2867 adjust_pc_after_break (struct execution_control_state *ecs)
2868 {
2869 struct regcache *regcache;
2870 struct gdbarch *gdbarch;
2871 struct address_space *aspace;
2872 CORE_ADDR breakpoint_pc;
2873
2874 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2875 we aren't, just return.
2876
2877 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2878 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2879 implemented by software breakpoints should be handled through the normal
2880 breakpoint layer.
2881
2882 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2883 different signals (SIGILL or SIGEMT for instance), but it is less
2884 clear where the PC is pointing afterwards. It may not match
2885 gdbarch_decr_pc_after_break. I don't know any specific target that
2886 generates these signals at breakpoints (the code has been in GDB since at
2887 least 1992) so I can not guess how to handle them here.
2888
2889 In earlier versions of GDB, a target with
2890 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2891 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2892 target with both of these set in GDB history, and it seems unlikely to be
2893 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2894
2895 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2896 return;
2897
2898 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2899 return;
2900
2901 /* In reverse execution, when a breakpoint is hit, the instruction
2902 under it has already been de-executed. The reported PC always
2903 points at the breakpoint address, so adjusting it further would
2904 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2905 architecture:
2906
2907 B1 0x08000000 : INSN1
2908 B2 0x08000001 : INSN2
2909 0x08000002 : INSN3
2910 PC -> 0x08000003 : INSN4
2911
2912 Say you're stopped at 0x08000003 as above. Reverse continuing
2913 from that point should hit B2 as below. Reading the PC when the
2914 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2915 been de-executed already.
2916
2917 B1 0x08000000 : INSN1
2918 B2 PC -> 0x08000001 : INSN2
2919 0x08000002 : INSN3
2920 0x08000003 : INSN4
2921
2922 We can't apply the same logic as for forward execution, because
2923 we would wrongly adjust the PC to 0x08000000, since there's a
2924 breakpoint at PC - 1. We'd then report a hit on B1, although
2925 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2926 behaviour. */
2927 if (execution_direction == EXEC_REVERSE)
2928 return;
2929
2930 /* If this target does not decrement the PC after breakpoints, then
2931 we have nothing to do. */
2932 regcache = get_thread_regcache (ecs->ptid);
2933 gdbarch = get_regcache_arch (regcache);
2934 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2935 return;
2936
2937 aspace = get_regcache_aspace (regcache);
2938
2939 /* Find the location where (if we've hit a breakpoint) the
2940 breakpoint would be. */
2941 breakpoint_pc = regcache_read_pc (regcache)
2942 - gdbarch_decr_pc_after_break (gdbarch);
2943
2944 /* Check whether there actually is a software breakpoint inserted at
2945 that location.
2946
2947 If in non-stop mode, a race condition is possible where we've
2948 removed a breakpoint, but stop events for that breakpoint were
2949 already queued and arrive later. To suppress those spurious
2950 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2951 and retire them after a number of stop events are reported. */
2952 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2953 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2954 {
2955 struct cleanup *old_cleanups = NULL;
2956
2957 if (RECORD_IS_USED)
2958 old_cleanups = record_gdb_operation_disable_set ();
2959
2960 /* When using hardware single-step, a SIGTRAP is reported for both
2961 a completed single-step and a software breakpoint. Need to
2962 differentiate between the two, as the latter needs adjusting
2963 but the former does not.
2964
2965 The SIGTRAP can be due to a completed hardware single-step only if
2966 - we didn't insert software single-step breakpoints
2967 - the thread to be examined is still the current thread
2968 - this thread is currently being stepped
2969
2970 If any of these events did not occur, we must have stopped due
2971 to hitting a software breakpoint, and have to back up to the
2972 breakpoint address.
2973
2974 As a special case, we could have hardware single-stepped a
2975 software breakpoint. In this case (prev_pc == breakpoint_pc),
2976 we also need to back up to the breakpoint address. */
2977
2978 if (singlestep_breakpoints_inserted_p
2979 || !ptid_equal (ecs->ptid, inferior_ptid)
2980 || !currently_stepping (ecs->event_thread)
2981 || ecs->event_thread->prev_pc == breakpoint_pc)
2982 regcache_write_pc (regcache, breakpoint_pc);
2983
2984 if (RECORD_IS_USED)
2985 do_cleanups (old_cleanups);
2986 }
2987 }
2988
2989 void
2990 init_infwait_state (void)
2991 {
2992 waiton_ptid = pid_to_ptid (-1);
2993 infwait_state = infwait_normal_state;
2994 }
2995
2996 void
2997 error_is_running (void)
2998 {
2999 error (_("Cannot execute this command while "
3000 "the selected thread is running."));
3001 }
3002
3003 void
3004 ensure_not_running (void)
3005 {
3006 if (is_running (inferior_ptid))
3007 error_is_running ();
3008 }
3009
3010 static int
3011 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3012 {
3013 for (frame = get_prev_frame (frame);
3014 frame != NULL;
3015 frame = get_prev_frame (frame))
3016 {
3017 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3018 return 1;
3019 if (get_frame_type (frame) != INLINE_FRAME)
3020 break;
3021 }
3022
3023 return 0;
3024 }
3025
3026 /* Auxiliary function that handles syscall entry/return events.
3027 It returns 1 if the inferior should keep going (and GDB
3028 should ignore the event), or 0 if the event deserves to be
3029 processed. */
3030
3031 static int
3032 handle_syscall_event (struct execution_control_state *ecs)
3033 {
3034 struct regcache *regcache;
3035 struct gdbarch *gdbarch;
3036 int syscall_number;
3037
3038 if (!ptid_equal (ecs->ptid, inferior_ptid))
3039 context_switch (ecs->ptid);
3040
3041 regcache = get_thread_regcache (ecs->ptid);
3042 gdbarch = get_regcache_arch (regcache);
3043 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3044 stop_pc = regcache_read_pc (regcache);
3045
3046 target_last_waitstatus.value.syscall_number = syscall_number;
3047
3048 if (catch_syscall_enabled () > 0
3049 && catching_syscall_number (syscall_number) > 0)
3050 {
3051 if (debug_infrun)
3052 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3053 syscall_number);
3054
3055 ecs->event_thread->control.stop_bpstat
3056 = bpstat_stop_status (get_regcache_aspace (regcache),
3057 stop_pc, ecs->ptid);
3058 ecs->random_signal
3059 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3060
3061 if (!ecs->random_signal)
3062 {
3063 /* Catchpoint hit. */
3064 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3065 return 0;
3066 }
3067 }
3068
3069 /* If no catchpoint triggered for this, then keep going. */
3070 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3071 keep_going (ecs);
3072 return 1;
3073 }
3074
3075 /* Given an execution control state that has been freshly filled in
3076 by an event from the inferior, figure out what it means and take
3077 appropriate action. */
3078
3079 static void
3080 handle_inferior_event (struct execution_control_state *ecs)
3081 {
3082 struct frame_info *frame;
3083 struct gdbarch *gdbarch;
3084 int sw_single_step_trap_p = 0;
3085 int stopped_by_watchpoint;
3086 int stepped_after_stopped_by_watchpoint = 0;
3087 struct symtab_and_line stop_pc_sal;
3088 enum stop_kind stop_soon;
3089
3090 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3091 {
3092 /* We had an event in the inferior, but we are not interested in
3093 handling it at this level. The lower layers have already
3094 done what needs to be done, if anything.
3095
3096 One of the possible circumstances for this is when the
3097 inferior produces output for the console. The inferior has
3098 not stopped, and we are ignoring the event. Another possible
3099 circumstance is any event which the lower level knows will be
3100 reported multiple times without an intervening resume. */
3101 if (debug_infrun)
3102 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3103 prepare_to_wait (ecs);
3104 return;
3105 }
3106
3107 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3108 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3109 {
3110 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3111
3112 gdb_assert (inf);
3113 stop_soon = inf->control.stop_soon;
3114 }
3115 else
3116 stop_soon = NO_STOP_QUIETLY;
3117
3118 /* Cache the last pid/waitstatus. */
3119 target_last_wait_ptid = ecs->ptid;
3120 target_last_waitstatus = ecs->ws;
3121
3122 /* Always clear state belonging to the previous time we stopped. */
3123 stop_stack_dummy = STOP_NONE;
3124
3125 /* If it's a new process, add it to the thread database. */
3126
3127 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3128 && !ptid_equal (ecs->ptid, minus_one_ptid)
3129 && !in_thread_list (ecs->ptid));
3130
3131 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3132 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3133 add_thread (ecs->ptid);
3134
3135 ecs->event_thread = find_thread_ptid (ecs->ptid);
3136
3137 /* Dependent on valid ECS->EVENT_THREAD. */
3138 adjust_pc_after_break (ecs);
3139
3140 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3141 reinit_frame_cache ();
3142
3143 breakpoint_retire_moribund ();
3144
3145 /* First, distinguish signals caused by the debugger from signals
3146 that have to do with the program's own actions. Note that
3147 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3148 on the operating system version. Here we detect when a SIGILL or
3149 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3150 something similar for SIGSEGV, since a SIGSEGV will be generated
3151 when we're trying to execute a breakpoint instruction on a
3152 non-executable stack. This happens for call dummy breakpoints
3153 for architectures like SPARC that place call dummies on the
3154 stack. */
3155 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3156 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3157 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3158 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3159 {
3160 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3161
3162 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3163 regcache_read_pc (regcache)))
3164 {
3165 if (debug_infrun)
3166 fprintf_unfiltered (gdb_stdlog,
3167 "infrun: Treating signal as SIGTRAP\n");
3168 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3169 }
3170 }
3171
3172 /* Mark the non-executing threads accordingly. In all-stop, all
3173 threads of all processes are stopped when we get any event
3174 reported. In non-stop mode, only the event thread stops. If
3175 we're handling a process exit in non-stop mode, there's nothing
3176 to do, as threads of the dead process are gone, and threads of
3177 any other process were left running. */
3178 if (!non_stop)
3179 set_executing (minus_one_ptid, 0);
3180 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3181 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3182 set_executing (inferior_ptid, 0);
3183
3184 switch (infwait_state)
3185 {
3186 case infwait_thread_hop_state:
3187 if (debug_infrun)
3188 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3189 break;
3190
3191 case infwait_normal_state:
3192 if (debug_infrun)
3193 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3194 break;
3195
3196 case infwait_step_watch_state:
3197 if (debug_infrun)
3198 fprintf_unfiltered (gdb_stdlog,
3199 "infrun: infwait_step_watch_state\n");
3200
3201 stepped_after_stopped_by_watchpoint = 1;
3202 break;
3203
3204 case infwait_nonstep_watch_state:
3205 if (debug_infrun)
3206 fprintf_unfiltered (gdb_stdlog,
3207 "infrun: infwait_nonstep_watch_state\n");
3208 insert_breakpoints ();
3209
3210 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3211 handle things like signals arriving and other things happening
3212 in combination correctly? */
3213 stepped_after_stopped_by_watchpoint = 1;
3214 break;
3215
3216 default:
3217 internal_error (__FILE__, __LINE__, _("bad switch"));
3218 }
3219
3220 infwait_state = infwait_normal_state;
3221 waiton_ptid = pid_to_ptid (-1);
3222
3223 switch (ecs->ws.kind)
3224 {
3225 case TARGET_WAITKIND_LOADED:
3226 if (debug_infrun)
3227 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3228 /* Ignore gracefully during startup of the inferior, as it might
3229 be the shell which has just loaded some objects, otherwise
3230 add the symbols for the newly loaded objects. Also ignore at
3231 the beginning of an attach or remote session; we will query
3232 the full list of libraries once the connection is
3233 established. */
3234 if (stop_soon == NO_STOP_QUIETLY)
3235 {
3236 /* Check for any newly added shared libraries if we're
3237 supposed to be adding them automatically. Switch
3238 terminal for any messages produced by
3239 breakpoint_re_set. */
3240 target_terminal_ours_for_output ();
3241 /* NOTE: cagney/2003-11-25: Make certain that the target
3242 stack's section table is kept up-to-date. Architectures,
3243 (e.g., PPC64), use the section table to perform
3244 operations such as address => section name and hence
3245 require the table to contain all sections (including
3246 those found in shared libraries). */
3247 #ifdef SOLIB_ADD
3248 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3249 #else
3250 solib_add (NULL, 0, &current_target, auto_solib_add);
3251 #endif
3252 target_terminal_inferior ();
3253
3254 /* If requested, stop when the dynamic linker notifies
3255 gdb of events. This allows the user to get control
3256 and place breakpoints in initializer routines for
3257 dynamically loaded objects (among other things). */
3258 if (stop_on_solib_events)
3259 {
3260 /* Make sure we print "Stopped due to solib-event" in
3261 normal_stop. */
3262 stop_print_frame = 1;
3263
3264 stop_stepping (ecs);
3265 return;
3266 }
3267
3268 /* NOTE drow/2007-05-11: This might be a good place to check
3269 for "catch load". */
3270 }
3271
3272 /* If we are skipping through a shell, or through shared library
3273 loading that we aren't interested in, resume the program. If
3274 we're running the program normally, also resume. But stop if
3275 we're attaching or setting up a remote connection. */
3276 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3277 {
3278 /* Loading of shared libraries might have changed breakpoint
3279 addresses. Make sure new breakpoints are inserted. */
3280 if (stop_soon == NO_STOP_QUIETLY
3281 && !breakpoints_always_inserted_mode ())
3282 insert_breakpoints ();
3283 resume (0, TARGET_SIGNAL_0);
3284 prepare_to_wait (ecs);
3285 return;
3286 }
3287
3288 break;
3289
3290 case TARGET_WAITKIND_SPURIOUS:
3291 if (debug_infrun)
3292 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3293 resume (0, TARGET_SIGNAL_0);
3294 prepare_to_wait (ecs);
3295 return;
3296
3297 case TARGET_WAITKIND_EXITED:
3298 if (debug_infrun)
3299 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3300 inferior_ptid = ecs->ptid;
3301 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3302 set_current_program_space (current_inferior ()->pspace);
3303 handle_vfork_child_exec_or_exit (0);
3304 target_terminal_ours (); /* Must do this before mourn anyway. */
3305 print_exited_reason (ecs->ws.value.integer);
3306
3307 /* Record the exit code in the convenience variable $_exitcode, so
3308 that the user can inspect this again later. */
3309 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3310 (LONGEST) ecs->ws.value.integer);
3311
3312 /* Also record this in the inferior itself. */
3313 current_inferior ()->has_exit_code = 1;
3314 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3315
3316 gdb_flush (gdb_stdout);
3317 target_mourn_inferior ();
3318 singlestep_breakpoints_inserted_p = 0;
3319 cancel_single_step_breakpoints ();
3320 stop_print_frame = 0;
3321 stop_stepping (ecs);
3322 return;
3323
3324 case TARGET_WAITKIND_SIGNALLED:
3325 if (debug_infrun)
3326 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3327 inferior_ptid = ecs->ptid;
3328 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3329 set_current_program_space (current_inferior ()->pspace);
3330 handle_vfork_child_exec_or_exit (0);
3331 stop_print_frame = 0;
3332 target_terminal_ours (); /* Must do this before mourn anyway. */
3333
3334 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3335 reach here unless the inferior is dead. However, for years
3336 target_kill() was called here, which hints that fatal signals aren't
3337 really fatal on some systems. If that's true, then some changes
3338 may be needed. */
3339 target_mourn_inferior ();
3340
3341 print_signal_exited_reason (ecs->ws.value.sig);
3342 singlestep_breakpoints_inserted_p = 0;
3343 cancel_single_step_breakpoints ();
3344 stop_stepping (ecs);
3345 return;
3346
3347 /* The following are the only cases in which we keep going;
3348 the above cases end in a continue or goto. */
3349 case TARGET_WAITKIND_FORKED:
3350 case TARGET_WAITKIND_VFORKED:
3351 if (debug_infrun)
3352 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3353
3354 if (!ptid_equal (ecs->ptid, inferior_ptid))
3355 {
3356 context_switch (ecs->ptid);
3357 reinit_frame_cache ();
3358 }
3359
3360 /* Immediately detach breakpoints from the child before there's
3361 any chance of letting the user delete breakpoints from the
3362 breakpoint lists. If we don't do this early, it's easy to
3363 leave left over traps in the child, vis: "break foo; catch
3364 fork; c; <fork>; del; c; <child calls foo>". We only follow
3365 the fork on the last `continue', and by that time the
3366 breakpoint at "foo" is long gone from the breakpoint table.
3367 If we vforked, then we don't need to unpatch here, since both
3368 parent and child are sharing the same memory pages; we'll
3369 need to unpatch at follow/detach time instead to be certain
3370 that new breakpoints added between catchpoint hit time and
3371 vfork follow are detached. */
3372 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3373 {
3374 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3375
3376 /* This won't actually modify the breakpoint list, but will
3377 physically remove the breakpoints from the child. */
3378 detach_breakpoints (child_pid);
3379 }
3380
3381 if (singlestep_breakpoints_inserted_p)
3382 {
3383 /* Pull the single step breakpoints out of the target. */
3384 remove_single_step_breakpoints ();
3385 singlestep_breakpoints_inserted_p = 0;
3386 }
3387
3388 /* In case the event is caught by a catchpoint, remember that
3389 the event is to be followed at the next resume of the thread,
3390 and not immediately. */
3391 ecs->event_thread->pending_follow = ecs->ws;
3392
3393 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3394
3395 ecs->event_thread->control.stop_bpstat
3396 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3397 stop_pc, ecs->ptid);
3398
3399 /* Note that we're interested in knowing the bpstat actually
3400 causes a stop, not just if it may explain the signal.
3401 Software watchpoints, for example, always appear in the
3402 bpstat. */
3403 ecs->random_signal
3404 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3405
3406 /* If no catchpoint triggered for this, then keep going. */
3407 if (ecs->random_signal)
3408 {
3409 ptid_t parent;
3410 ptid_t child;
3411 int should_resume;
3412 int follow_child
3413 = (follow_fork_mode_string == follow_fork_mode_child);
3414
3415 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3416
3417 should_resume = follow_fork ();
3418
3419 parent = ecs->ptid;
3420 child = ecs->ws.value.related_pid;
3421
3422 /* In non-stop mode, also resume the other branch. */
3423 if (non_stop && !detach_fork)
3424 {
3425 if (follow_child)
3426 switch_to_thread (parent);
3427 else
3428 switch_to_thread (child);
3429
3430 ecs->event_thread = inferior_thread ();
3431 ecs->ptid = inferior_ptid;
3432 keep_going (ecs);
3433 }
3434
3435 if (follow_child)
3436 switch_to_thread (child);
3437 else
3438 switch_to_thread (parent);
3439
3440 ecs->event_thread = inferior_thread ();
3441 ecs->ptid = inferior_ptid;
3442
3443 if (should_resume)
3444 keep_going (ecs);
3445 else
3446 stop_stepping (ecs);
3447 return;
3448 }
3449 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3450 goto process_event_stop_test;
3451
3452 case TARGET_WAITKIND_VFORK_DONE:
3453 /* Done with the shared memory region. Re-insert breakpoints in
3454 the parent, and keep going. */
3455
3456 if (debug_infrun)
3457 fprintf_unfiltered (gdb_stdlog,
3458 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3459
3460 if (!ptid_equal (ecs->ptid, inferior_ptid))
3461 context_switch (ecs->ptid);
3462
3463 current_inferior ()->waiting_for_vfork_done = 0;
3464 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3465 /* This also takes care of reinserting breakpoints in the
3466 previously locked inferior. */
3467 keep_going (ecs);
3468 return;
3469
3470 case TARGET_WAITKIND_EXECD:
3471 if (debug_infrun)
3472 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3473
3474 if (!ptid_equal (ecs->ptid, inferior_ptid))
3475 {
3476 context_switch (ecs->ptid);
3477 reinit_frame_cache ();
3478 }
3479
3480 singlestep_breakpoints_inserted_p = 0;
3481 cancel_single_step_breakpoints ();
3482
3483 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3484
3485 /* Do whatever is necessary to the parent branch of the vfork. */
3486 handle_vfork_child_exec_or_exit (1);
3487
3488 /* This causes the eventpoints and symbol table to be reset.
3489 Must do this now, before trying to determine whether to
3490 stop. */
3491 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3492
3493 ecs->event_thread->control.stop_bpstat
3494 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3495 stop_pc, ecs->ptid);
3496 ecs->random_signal
3497 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3498
3499 /* Note that this may be referenced from inside
3500 bpstat_stop_status above, through inferior_has_execd. */
3501 xfree (ecs->ws.value.execd_pathname);
3502 ecs->ws.value.execd_pathname = NULL;
3503
3504 /* If no catchpoint triggered for this, then keep going. */
3505 if (ecs->random_signal)
3506 {
3507 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3508 keep_going (ecs);
3509 return;
3510 }
3511 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3512 goto process_event_stop_test;
3513
3514 /* Be careful not to try to gather much state about a thread
3515 that's in a syscall. It's frequently a losing proposition. */
3516 case TARGET_WAITKIND_SYSCALL_ENTRY:
3517 if (debug_infrun)
3518 fprintf_unfiltered (gdb_stdlog,
3519 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3520 /* Getting the current syscall number. */
3521 if (handle_syscall_event (ecs) != 0)
3522 return;
3523 goto process_event_stop_test;
3524
3525 /* Before examining the threads further, step this thread to
3526 get it entirely out of the syscall. (We get notice of the
3527 event when the thread is just on the verge of exiting a
3528 syscall. Stepping one instruction seems to get it back
3529 into user code.) */
3530 case TARGET_WAITKIND_SYSCALL_RETURN:
3531 if (debug_infrun)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3534 if (handle_syscall_event (ecs) != 0)
3535 return;
3536 goto process_event_stop_test;
3537
3538 case TARGET_WAITKIND_STOPPED:
3539 if (debug_infrun)
3540 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3541 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3542 break;
3543
3544 case TARGET_WAITKIND_NO_HISTORY:
3545 /* Reverse execution: target ran out of history info. */
3546 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3547 print_no_history_reason ();
3548 stop_stepping (ecs);
3549 return;
3550 }
3551
3552 if (ecs->new_thread_event)
3553 {
3554 if (non_stop)
3555 /* Non-stop assumes that the target handles adding new threads
3556 to the thread list. */
3557 internal_error (__FILE__, __LINE__,
3558 "targets should add new threads to the thread "
3559 "list themselves in non-stop mode.");
3560
3561 /* We may want to consider not doing a resume here in order to
3562 give the user a chance to play with the new thread. It might
3563 be good to make that a user-settable option. */
3564
3565 /* At this point, all threads are stopped (happens automatically
3566 in either the OS or the native code). Therefore we need to
3567 continue all threads in order to make progress. */
3568
3569 if (!ptid_equal (ecs->ptid, inferior_ptid))
3570 context_switch (ecs->ptid);
3571 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3572 prepare_to_wait (ecs);
3573 return;
3574 }
3575
3576 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3577 {
3578 /* Do we need to clean up the state of a thread that has
3579 completed a displaced single-step? (Doing so usually affects
3580 the PC, so do it here, before we set stop_pc.) */
3581 displaced_step_fixup (ecs->ptid,
3582 ecs->event_thread->suspend.stop_signal);
3583
3584 /* If we either finished a single-step or hit a breakpoint, but
3585 the user wanted this thread to be stopped, pretend we got a
3586 SIG0 (generic unsignaled stop). */
3587
3588 if (ecs->event_thread->stop_requested
3589 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3590 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3591 }
3592
3593 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3594
3595 if (debug_infrun)
3596 {
3597 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3598 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3599 struct cleanup *old_chain = save_inferior_ptid ();
3600
3601 inferior_ptid = ecs->ptid;
3602
3603 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3604 paddress (gdbarch, stop_pc));
3605 if (target_stopped_by_watchpoint ())
3606 {
3607 CORE_ADDR addr;
3608
3609 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3610
3611 if (target_stopped_data_address (&current_target, &addr))
3612 fprintf_unfiltered (gdb_stdlog,
3613 "infrun: stopped data address = %s\n",
3614 paddress (gdbarch, addr));
3615 else
3616 fprintf_unfiltered (gdb_stdlog,
3617 "infrun: (no data address available)\n");
3618 }
3619
3620 do_cleanups (old_chain);
3621 }
3622
3623 if (stepping_past_singlestep_breakpoint)
3624 {
3625 gdb_assert (singlestep_breakpoints_inserted_p);
3626 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3627 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3628
3629 stepping_past_singlestep_breakpoint = 0;
3630
3631 /* We've either finished single-stepping past the single-step
3632 breakpoint, or stopped for some other reason. It would be nice if
3633 we could tell, but we can't reliably. */
3634 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3635 {
3636 if (debug_infrun)
3637 fprintf_unfiltered (gdb_stdlog,
3638 "infrun: stepping_past_"
3639 "singlestep_breakpoint\n");
3640 /* Pull the single step breakpoints out of the target. */
3641 remove_single_step_breakpoints ();
3642 singlestep_breakpoints_inserted_p = 0;
3643
3644 ecs->random_signal = 0;
3645 ecs->event_thread->control.trap_expected = 0;
3646
3647 context_switch (saved_singlestep_ptid);
3648 if (deprecated_context_hook)
3649 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3650
3651 resume (1, TARGET_SIGNAL_0);
3652 prepare_to_wait (ecs);
3653 return;
3654 }
3655 }
3656
3657 if (!ptid_equal (deferred_step_ptid, null_ptid))
3658 {
3659 /* In non-stop mode, there's never a deferred_step_ptid set. */
3660 gdb_assert (!non_stop);
3661
3662 /* If we stopped for some other reason than single-stepping, ignore
3663 the fact that we were supposed to switch back. */
3664 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3665 {
3666 if (debug_infrun)
3667 fprintf_unfiltered (gdb_stdlog,
3668 "infrun: handling deferred step\n");
3669
3670 /* Pull the single step breakpoints out of the target. */
3671 if (singlestep_breakpoints_inserted_p)
3672 {
3673 remove_single_step_breakpoints ();
3674 singlestep_breakpoints_inserted_p = 0;
3675 }
3676
3677 ecs->event_thread->control.trap_expected = 0;
3678
3679 /* Note: We do not call context_switch at this point, as the
3680 context is already set up for stepping the original thread. */
3681 switch_to_thread (deferred_step_ptid);
3682 deferred_step_ptid = null_ptid;
3683 /* Suppress spurious "Switching to ..." message. */
3684 previous_inferior_ptid = inferior_ptid;
3685
3686 resume (1, TARGET_SIGNAL_0);
3687 prepare_to_wait (ecs);
3688 return;
3689 }
3690
3691 deferred_step_ptid = null_ptid;
3692 }
3693
3694 /* See if a thread hit a thread-specific breakpoint that was meant for
3695 another thread. If so, then step that thread past the breakpoint,
3696 and continue it. */
3697
3698 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3699 {
3700 int thread_hop_needed = 0;
3701 struct address_space *aspace =
3702 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3703
3704 /* Check if a regular breakpoint has been hit before checking
3705 for a potential single step breakpoint. Otherwise, GDB will
3706 not see this breakpoint hit when stepping onto breakpoints. */
3707 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3708 {
3709 ecs->random_signal = 0;
3710 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3711 thread_hop_needed = 1;
3712 }
3713 else if (singlestep_breakpoints_inserted_p)
3714 {
3715 /* We have not context switched yet, so this should be true
3716 no matter which thread hit the singlestep breakpoint. */
3717 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3718 if (debug_infrun)
3719 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3720 "trap for %s\n",
3721 target_pid_to_str (ecs->ptid));
3722
3723 ecs->random_signal = 0;
3724 /* The call to in_thread_list is necessary because PTIDs sometimes
3725 change when we go from single-threaded to multi-threaded. If
3726 the singlestep_ptid is still in the list, assume that it is
3727 really different from ecs->ptid. */
3728 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3729 && in_thread_list (singlestep_ptid))
3730 {
3731 /* If the PC of the thread we were trying to single-step
3732 has changed, discard this event (which we were going
3733 to ignore anyway), and pretend we saw that thread
3734 trap. This prevents us continuously moving the
3735 single-step breakpoint forward, one instruction at a
3736 time. If the PC has changed, then the thread we were
3737 trying to single-step has trapped or been signalled,
3738 but the event has not been reported to GDB yet.
3739
3740 There might be some cases where this loses signal
3741 information, if a signal has arrived at exactly the
3742 same time that the PC changed, but this is the best
3743 we can do with the information available. Perhaps we
3744 should arrange to report all events for all threads
3745 when they stop, or to re-poll the remote looking for
3746 this particular thread (i.e. temporarily enable
3747 schedlock). */
3748
3749 CORE_ADDR new_singlestep_pc
3750 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3751
3752 if (new_singlestep_pc != singlestep_pc)
3753 {
3754 enum target_signal stop_signal;
3755
3756 if (debug_infrun)
3757 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3758 " but expected thread advanced also\n");
3759
3760 /* The current context still belongs to
3761 singlestep_ptid. Don't swap here, since that's
3762 the context we want to use. Just fudge our
3763 state and continue. */
3764 stop_signal = ecs->event_thread->suspend.stop_signal;
3765 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3766 ecs->ptid = singlestep_ptid;
3767 ecs->event_thread = find_thread_ptid (ecs->ptid);
3768 ecs->event_thread->suspend.stop_signal = stop_signal;
3769 stop_pc = new_singlestep_pc;
3770 }
3771 else
3772 {
3773 if (debug_infrun)
3774 fprintf_unfiltered (gdb_stdlog,
3775 "infrun: unexpected thread\n");
3776
3777 thread_hop_needed = 1;
3778 stepping_past_singlestep_breakpoint = 1;
3779 saved_singlestep_ptid = singlestep_ptid;
3780 }
3781 }
3782 }
3783
3784 if (thread_hop_needed)
3785 {
3786 struct regcache *thread_regcache;
3787 int remove_status = 0;
3788
3789 if (debug_infrun)
3790 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3791
3792 /* Switch context before touching inferior memory, the
3793 previous thread may have exited. */
3794 if (!ptid_equal (inferior_ptid, ecs->ptid))
3795 context_switch (ecs->ptid);
3796
3797 /* Saw a breakpoint, but it was hit by the wrong thread.
3798 Just continue. */
3799
3800 if (singlestep_breakpoints_inserted_p)
3801 {
3802 /* Pull the single step breakpoints out of the target. */
3803 remove_single_step_breakpoints ();
3804 singlestep_breakpoints_inserted_p = 0;
3805 }
3806
3807 /* If the arch can displace step, don't remove the
3808 breakpoints. */
3809 thread_regcache = get_thread_regcache (ecs->ptid);
3810 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3811 remove_status = remove_breakpoints ();
3812
3813 /* Did we fail to remove breakpoints? If so, try
3814 to set the PC past the bp. (There's at least
3815 one situation in which we can fail to remove
3816 the bp's: On HP-UX's that use ttrace, we can't
3817 change the address space of a vforking child
3818 process until the child exits (well, okay, not
3819 then either :-) or execs. */
3820 if (remove_status != 0)
3821 error (_("Cannot step over breakpoint hit in wrong thread"));
3822 else
3823 { /* Single step */
3824 if (!non_stop)
3825 {
3826 /* Only need to require the next event from this
3827 thread in all-stop mode. */
3828 waiton_ptid = ecs->ptid;
3829 infwait_state = infwait_thread_hop_state;
3830 }
3831
3832 ecs->event_thread->stepping_over_breakpoint = 1;
3833 keep_going (ecs);
3834 return;
3835 }
3836 }
3837 else if (singlestep_breakpoints_inserted_p)
3838 {
3839 sw_single_step_trap_p = 1;
3840 ecs->random_signal = 0;
3841 }
3842 }
3843 else
3844 ecs->random_signal = 1;
3845
3846 /* See if something interesting happened to the non-current thread. If
3847 so, then switch to that thread. */
3848 if (!ptid_equal (ecs->ptid, inferior_ptid))
3849 {
3850 if (debug_infrun)
3851 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3852
3853 context_switch (ecs->ptid);
3854
3855 if (deprecated_context_hook)
3856 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3857 }
3858
3859 /* At this point, get hold of the now-current thread's frame. */
3860 frame = get_current_frame ();
3861 gdbarch = get_frame_arch (frame);
3862
3863 if (singlestep_breakpoints_inserted_p)
3864 {
3865 /* Pull the single step breakpoints out of the target. */
3866 remove_single_step_breakpoints ();
3867 singlestep_breakpoints_inserted_p = 0;
3868 }
3869
3870 if (stepped_after_stopped_by_watchpoint)
3871 stopped_by_watchpoint = 0;
3872 else
3873 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3874
3875 /* If necessary, step over this watchpoint. We'll be back to display
3876 it in a moment. */
3877 if (stopped_by_watchpoint
3878 && (target_have_steppable_watchpoint
3879 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3880 {
3881 /* At this point, we are stopped at an instruction which has
3882 attempted to write to a piece of memory under control of
3883 a watchpoint. The instruction hasn't actually executed
3884 yet. If we were to evaluate the watchpoint expression
3885 now, we would get the old value, and therefore no change
3886 would seem to have occurred.
3887
3888 In order to make watchpoints work `right', we really need
3889 to complete the memory write, and then evaluate the
3890 watchpoint expression. We do this by single-stepping the
3891 target.
3892
3893 It may not be necessary to disable the watchpoint to stop over
3894 it. For example, the PA can (with some kernel cooperation)
3895 single step over a watchpoint without disabling the watchpoint.
3896
3897 It is far more common to need to disable a watchpoint to step
3898 the inferior over it. If we have non-steppable watchpoints,
3899 we must disable the current watchpoint; it's simplest to
3900 disable all watchpoints and breakpoints. */
3901 int hw_step = 1;
3902
3903 if (!target_have_steppable_watchpoint)
3904 {
3905 remove_breakpoints ();
3906 /* See comment in resume why we need to stop bypassing signals
3907 while breakpoints have been removed. */
3908 target_pass_signals (0, NULL);
3909 }
3910 /* Single step */
3911 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3912 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3913 waiton_ptid = ecs->ptid;
3914 if (target_have_steppable_watchpoint)
3915 infwait_state = infwait_step_watch_state;
3916 else
3917 infwait_state = infwait_nonstep_watch_state;
3918 prepare_to_wait (ecs);
3919 return;
3920 }
3921
3922 ecs->stop_func_start = 0;
3923 ecs->stop_func_end = 0;
3924 ecs->stop_func_name = 0;
3925 /* Don't care about return value; stop_func_start and stop_func_name
3926 will both be 0 if it doesn't work. */
3927 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3928 &ecs->stop_func_start, &ecs->stop_func_end);
3929 ecs->stop_func_start
3930 += gdbarch_deprecated_function_start_offset (gdbarch);
3931 ecs->event_thread->stepping_over_breakpoint = 0;
3932 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
3933 ecs->event_thread->control.stop_step = 0;
3934 stop_print_frame = 1;
3935 ecs->random_signal = 0;
3936 stopped_by_random_signal = 0;
3937
3938 /* Hide inlined functions starting here, unless we just performed stepi or
3939 nexti. After stepi and nexti, always show the innermost frame (not any
3940 inline function call sites). */
3941 if (ecs->event_thread->control.step_range_end != 1)
3942 skip_inline_frames (ecs->ptid);
3943
3944 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3945 && ecs->event_thread->control.trap_expected
3946 && gdbarch_single_step_through_delay_p (gdbarch)
3947 && currently_stepping (ecs->event_thread))
3948 {
3949 /* We're trying to step off a breakpoint. Turns out that we're
3950 also on an instruction that needs to be stepped multiple
3951 times before it's been fully executing. E.g., architectures
3952 with a delay slot. It needs to be stepped twice, once for
3953 the instruction and once for the delay slot. */
3954 int step_through_delay
3955 = gdbarch_single_step_through_delay (gdbarch, frame);
3956
3957 if (debug_infrun && step_through_delay)
3958 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3959 if (ecs->event_thread->control.step_range_end == 0
3960 && step_through_delay)
3961 {
3962 /* The user issued a continue when stopped at a breakpoint.
3963 Set up for another trap and get out of here. */
3964 ecs->event_thread->stepping_over_breakpoint = 1;
3965 keep_going (ecs);
3966 return;
3967 }
3968 else if (step_through_delay)
3969 {
3970 /* The user issued a step when stopped at a breakpoint.
3971 Maybe we should stop, maybe we should not - the delay
3972 slot *might* correspond to a line of source. In any
3973 case, don't decide that here, just set
3974 ecs->stepping_over_breakpoint, making sure we
3975 single-step again before breakpoints are re-inserted. */
3976 ecs->event_thread->stepping_over_breakpoint = 1;
3977 }
3978 }
3979
3980 /* Look at the cause of the stop, and decide what to do.
3981 The alternatives are:
3982 1) stop_stepping and return; to really stop and return to the debugger,
3983 2) keep_going and return to start up again
3984 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3985 3) set ecs->random_signal to 1, and the decision between 1 and 2
3986 will be made according to the signal handling tables. */
3987
3988 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3989 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3990 || stop_soon == STOP_QUIETLY_REMOTE)
3991 {
3992 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3993 && stop_after_trap)
3994 {
3995 if (debug_infrun)
3996 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3997 stop_print_frame = 0;
3998 stop_stepping (ecs);
3999 return;
4000 }
4001
4002 /* This is originated from start_remote(), start_inferior() and
4003 shared libraries hook functions. */
4004 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4005 {
4006 if (debug_infrun)
4007 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4008 stop_stepping (ecs);
4009 return;
4010 }
4011
4012 /* This originates from attach_command(). We need to overwrite
4013 the stop_signal here, because some kernels don't ignore a
4014 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4015 See more comments in inferior.h. On the other hand, if we
4016 get a non-SIGSTOP, report it to the user - assume the backend
4017 will handle the SIGSTOP if it should show up later.
4018
4019 Also consider that the attach is complete when we see a
4020 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4021 target extended-remote report it instead of a SIGSTOP
4022 (e.g. gdbserver). We already rely on SIGTRAP being our
4023 signal, so this is no exception.
4024
4025 Also consider that the attach is complete when we see a
4026 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4027 the target to stop all threads of the inferior, in case the
4028 low level attach operation doesn't stop them implicitly. If
4029 they weren't stopped implicitly, then the stub will report a
4030 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4031 other than GDB's request. */
4032 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4033 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4034 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4035 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4036 {
4037 stop_stepping (ecs);
4038 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4039 return;
4040 }
4041
4042 /* See if there is a breakpoint at the current PC. */
4043 ecs->event_thread->control.stop_bpstat
4044 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4045 stop_pc, ecs->ptid);
4046
4047 /* Following in case break condition called a
4048 function. */
4049 stop_print_frame = 1;
4050
4051 /* This is where we handle "moribund" watchpoints. Unlike
4052 software breakpoints traps, hardware watchpoint traps are
4053 always distinguishable from random traps. If no high-level
4054 watchpoint is associated with the reported stop data address
4055 anymore, then the bpstat does not explain the signal ---
4056 simply make sure to ignore it if `stopped_by_watchpoint' is
4057 set. */
4058
4059 if (debug_infrun
4060 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4061 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4062 && stopped_by_watchpoint)
4063 fprintf_unfiltered (gdb_stdlog,
4064 "infrun: no user watchpoint explains "
4065 "watchpoint SIGTRAP, ignoring\n");
4066
4067 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4068 at one stage in the past included checks for an inferior
4069 function call's call dummy's return breakpoint. The original
4070 comment, that went with the test, read:
4071
4072 ``End of a stack dummy. Some systems (e.g. Sony news) give
4073 another signal besides SIGTRAP, so check here as well as
4074 above.''
4075
4076 If someone ever tries to get call dummys on a
4077 non-executable stack to work (where the target would stop
4078 with something like a SIGSEGV), then those tests might need
4079 to be re-instated. Given, however, that the tests were only
4080 enabled when momentary breakpoints were not being used, I
4081 suspect that it won't be the case.
4082
4083 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4084 be necessary for call dummies on a non-executable stack on
4085 SPARC. */
4086
4087 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4088 ecs->random_signal
4089 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4090 || stopped_by_watchpoint
4091 || ecs->event_thread->control.trap_expected
4092 || (ecs->event_thread->control.step_range_end
4093 && (ecs->event_thread->control.step_resume_breakpoint
4094 == NULL)));
4095 else
4096 {
4097 ecs->random_signal = !bpstat_explains_signal
4098 (ecs->event_thread->control.stop_bpstat);
4099 if (!ecs->random_signal)
4100 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4101 }
4102 }
4103
4104 /* When we reach this point, we've pretty much decided
4105 that the reason for stopping must've been a random
4106 (unexpected) signal. */
4107
4108 else
4109 ecs->random_signal = 1;
4110
4111 process_event_stop_test:
4112
4113 /* Re-fetch current thread's frame in case we did a
4114 "goto process_event_stop_test" above. */
4115 frame = get_current_frame ();
4116 gdbarch = get_frame_arch (frame);
4117
4118 /* For the program's own signals, act according to
4119 the signal handling tables. */
4120
4121 if (ecs->random_signal)
4122 {
4123 /* Signal not for debugging purposes. */
4124 int printed = 0;
4125 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4126
4127 if (debug_infrun)
4128 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4129 ecs->event_thread->suspend.stop_signal);
4130
4131 stopped_by_random_signal = 1;
4132
4133 if (signal_print[ecs->event_thread->suspend.stop_signal])
4134 {
4135 printed = 1;
4136 target_terminal_ours_for_output ();
4137 print_signal_received_reason
4138 (ecs->event_thread->suspend.stop_signal);
4139 }
4140 /* Always stop on signals if we're either just gaining control
4141 of the program, or the user explicitly requested this thread
4142 to remain stopped. */
4143 if (stop_soon != NO_STOP_QUIETLY
4144 || ecs->event_thread->stop_requested
4145 || (!inf->detaching
4146 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4147 {
4148 stop_stepping (ecs);
4149 return;
4150 }
4151 /* If not going to stop, give terminal back
4152 if we took it away. */
4153 else if (printed)
4154 target_terminal_inferior ();
4155
4156 /* Clear the signal if it should not be passed. */
4157 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4158 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4159
4160 if (ecs->event_thread->prev_pc == stop_pc
4161 && ecs->event_thread->control.trap_expected
4162 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4163 {
4164 /* We were just starting a new sequence, attempting to
4165 single-step off of a breakpoint and expecting a SIGTRAP.
4166 Instead this signal arrives. This signal will take us out
4167 of the stepping range so GDB needs to remember to, when
4168 the signal handler returns, resume stepping off that
4169 breakpoint. */
4170 /* To simplify things, "continue" is forced to use the same
4171 code paths as single-step - set a breakpoint at the
4172 signal return address and then, once hit, step off that
4173 breakpoint. */
4174 if (debug_infrun)
4175 fprintf_unfiltered (gdb_stdlog,
4176 "infrun: signal arrived while stepping over "
4177 "breakpoint\n");
4178
4179 insert_hp_step_resume_breakpoint_at_frame (frame);
4180 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4181 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4182 ecs->event_thread->control.trap_expected = 0;
4183 keep_going (ecs);
4184 return;
4185 }
4186
4187 if (ecs->event_thread->control.step_range_end != 0
4188 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4189 && (ecs->event_thread->control.step_range_start <= stop_pc
4190 && stop_pc < ecs->event_thread->control.step_range_end)
4191 && frame_id_eq (get_stack_frame_id (frame),
4192 ecs->event_thread->control.step_stack_frame_id)
4193 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4194 {
4195 /* The inferior is about to take a signal that will take it
4196 out of the single step range. Set a breakpoint at the
4197 current PC (which is presumably where the signal handler
4198 will eventually return) and then allow the inferior to
4199 run free.
4200
4201 Note that this is only needed for a signal delivered
4202 while in the single-step range. Nested signals aren't a
4203 problem as they eventually all return. */
4204 if (debug_infrun)
4205 fprintf_unfiltered (gdb_stdlog,
4206 "infrun: signal may take us out of "
4207 "single-step range\n");
4208
4209 insert_hp_step_resume_breakpoint_at_frame (frame);
4210 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4211 ecs->event_thread->control.trap_expected = 0;
4212 keep_going (ecs);
4213 return;
4214 }
4215
4216 /* Note: step_resume_breakpoint may be non-NULL. This occures
4217 when either there's a nested signal, or when there's a
4218 pending signal enabled just as the signal handler returns
4219 (leaving the inferior at the step-resume-breakpoint without
4220 actually executing it). Either way continue until the
4221 breakpoint is really hit. */
4222 keep_going (ecs);
4223 return;
4224 }
4225
4226 /* Handle cases caused by hitting a breakpoint. */
4227 {
4228 CORE_ADDR jmp_buf_pc;
4229 struct bpstat_what what;
4230
4231 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4232
4233 if (what.call_dummy)
4234 {
4235 stop_stack_dummy = what.call_dummy;
4236 }
4237
4238 /* If we hit an internal event that triggers symbol changes, the
4239 current frame will be invalidated within bpstat_what (e.g., if
4240 we hit an internal solib event). Re-fetch it. */
4241 frame = get_current_frame ();
4242 gdbarch = get_frame_arch (frame);
4243
4244 switch (what.main_action)
4245 {
4246 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4247 /* If we hit the breakpoint at longjmp while stepping, we
4248 install a momentary breakpoint at the target of the
4249 jmp_buf. */
4250
4251 if (debug_infrun)
4252 fprintf_unfiltered (gdb_stdlog,
4253 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4254
4255 ecs->event_thread->stepping_over_breakpoint = 1;
4256
4257 if (what.is_longjmp)
4258 {
4259 if (!gdbarch_get_longjmp_target_p (gdbarch)
4260 || !gdbarch_get_longjmp_target (gdbarch,
4261 frame, &jmp_buf_pc))
4262 {
4263 if (debug_infrun)
4264 fprintf_unfiltered (gdb_stdlog,
4265 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4266 "(!gdbarch_get_longjmp_target)\n");
4267 keep_going (ecs);
4268 return;
4269 }
4270
4271 /* We're going to replace the current step-resume breakpoint
4272 with a longjmp-resume breakpoint. */
4273 delete_step_resume_breakpoint (ecs->event_thread);
4274
4275 /* Insert a breakpoint at resume address. */
4276 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4277 }
4278 else
4279 {
4280 struct symbol *func = get_frame_function (frame);
4281
4282 if (func)
4283 check_exception_resume (ecs, frame, func);
4284 }
4285 keep_going (ecs);
4286 return;
4287
4288 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4289 if (debug_infrun)
4290 fprintf_unfiltered (gdb_stdlog,
4291 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4292
4293 if (what.is_longjmp)
4294 {
4295 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4296 != NULL);
4297 delete_step_resume_breakpoint (ecs->event_thread);
4298 }
4299 else
4300 {
4301 /* There are several cases to consider.
4302
4303 1. The initiating frame no longer exists. In this case
4304 we must stop, because the exception has gone too far.
4305
4306 2. The initiating frame exists, and is the same as the
4307 current frame. We stop, because the exception has been
4308 caught.
4309
4310 3. The initiating frame exists and is different from
4311 the current frame. This means the exception has been
4312 caught beneath the initiating frame, so keep going. */
4313 struct frame_info *init_frame
4314 = frame_find_by_id (ecs->event_thread->initiating_frame);
4315
4316 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4317 != NULL);
4318 delete_exception_resume_breakpoint (ecs->event_thread);
4319
4320 if (init_frame)
4321 {
4322 struct frame_id current_id
4323 = get_frame_id (get_current_frame ());
4324 if (frame_id_eq (current_id,
4325 ecs->event_thread->initiating_frame))
4326 {
4327 /* Case 2. Fall through. */
4328 }
4329 else
4330 {
4331 /* Case 3. */
4332 keep_going (ecs);
4333 return;
4334 }
4335 }
4336
4337 /* For Cases 1 and 2, remove the step-resume breakpoint,
4338 if it exists. */
4339 delete_step_resume_breakpoint (ecs->event_thread);
4340 }
4341
4342 ecs->event_thread->control.stop_step = 1;
4343 print_end_stepping_range_reason ();
4344 stop_stepping (ecs);
4345 return;
4346
4347 case BPSTAT_WHAT_SINGLE:
4348 if (debug_infrun)
4349 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4350 ecs->event_thread->stepping_over_breakpoint = 1;
4351 /* Still need to check other stuff, at least the case
4352 where we are stepping and step out of the right range. */
4353 break;
4354
4355 case BPSTAT_WHAT_STEP_RESUME:
4356 if (debug_infrun)
4357 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4358
4359 delete_step_resume_breakpoint (ecs->event_thread);
4360 if (ecs->event_thread->control.proceed_to_finish
4361 && execution_direction == EXEC_REVERSE)
4362 {
4363 struct thread_info *tp = ecs->event_thread;
4364
4365 /* We are finishing a function in reverse, and just hit
4366 the step-resume breakpoint at the start address of the
4367 function, and we're almost there -- just need to back
4368 up by one more single-step, which should take us back
4369 to the function call. */
4370 tp->control.step_range_start = tp->control.step_range_end = 1;
4371 keep_going (ecs);
4372 return;
4373 }
4374 if (stop_pc == ecs->stop_func_start
4375 && execution_direction == EXEC_REVERSE)
4376 {
4377 /* We are stepping over a function call in reverse, and
4378 just hit the step-resume breakpoint at the start
4379 address of the function. Go back to single-stepping,
4380 which should take us back to the function call. */
4381 ecs->event_thread->stepping_over_breakpoint = 1;
4382 keep_going (ecs);
4383 return;
4384 }
4385 break;
4386
4387 case BPSTAT_WHAT_STOP_NOISY:
4388 if (debug_infrun)
4389 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4390 stop_print_frame = 1;
4391
4392 /* We are about to nuke the step_resume_breakpointt via the
4393 cleanup chain, so no need to worry about it here. */
4394
4395 stop_stepping (ecs);
4396 return;
4397
4398 case BPSTAT_WHAT_STOP_SILENT:
4399 if (debug_infrun)
4400 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4401 stop_print_frame = 0;
4402
4403 /* We are about to nuke the step_resume_breakpoin via the
4404 cleanup chain, so no need to worry about it here. */
4405
4406 stop_stepping (ecs);
4407 return;
4408
4409 case BPSTAT_WHAT_HP_STEP_RESUME:
4410 if (debug_infrun)
4411 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4412
4413 delete_step_resume_breakpoint (ecs->event_thread);
4414 if (ecs->event_thread->step_after_step_resume_breakpoint)
4415 {
4416 /* Back when the step-resume breakpoint was inserted, we
4417 were trying to single-step off a breakpoint. Go back
4418 to doing that. */
4419 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4420 ecs->event_thread->stepping_over_breakpoint = 1;
4421 keep_going (ecs);
4422 return;
4423 }
4424 break;
4425
4426 case BPSTAT_WHAT_KEEP_CHECKING:
4427 break;
4428 }
4429 }
4430
4431 /* We come here if we hit a breakpoint but should not
4432 stop for it. Possibly we also were stepping
4433 and should stop for that. So fall through and
4434 test for stepping. But, if not stepping,
4435 do not stop. */
4436
4437 /* In all-stop mode, if we're currently stepping but have stopped in
4438 some other thread, we need to switch back to the stepped thread. */
4439 if (!non_stop)
4440 {
4441 struct thread_info *tp;
4442
4443 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4444 ecs->event_thread);
4445 if (tp)
4446 {
4447 /* However, if the current thread is blocked on some internal
4448 breakpoint, and we simply need to step over that breakpoint
4449 to get it going again, do that first. */
4450 if ((ecs->event_thread->control.trap_expected
4451 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4452 || ecs->event_thread->stepping_over_breakpoint)
4453 {
4454 keep_going (ecs);
4455 return;
4456 }
4457
4458 /* If the stepping thread exited, then don't try to switch
4459 back and resume it, which could fail in several different
4460 ways depending on the target. Instead, just keep going.
4461
4462 We can find a stepping dead thread in the thread list in
4463 two cases:
4464
4465 - The target supports thread exit events, and when the
4466 target tries to delete the thread from the thread list,
4467 inferior_ptid pointed at the exiting thread. In such
4468 case, calling delete_thread does not really remove the
4469 thread from the list; instead, the thread is left listed,
4470 with 'exited' state.
4471
4472 - The target's debug interface does not support thread
4473 exit events, and so we have no idea whatsoever if the
4474 previously stepping thread is still alive. For that
4475 reason, we need to synchronously query the target
4476 now. */
4477 if (is_exited (tp->ptid)
4478 || !target_thread_alive (tp->ptid))
4479 {
4480 if (debug_infrun)
4481 fprintf_unfiltered (gdb_stdlog,
4482 "infrun: not switching back to "
4483 "stepped thread, it has vanished\n");
4484
4485 delete_thread (tp->ptid);
4486 keep_going (ecs);
4487 return;
4488 }
4489
4490 /* Otherwise, we no longer expect a trap in the current thread.
4491 Clear the trap_expected flag before switching back -- this is
4492 what keep_going would do as well, if we called it. */
4493 ecs->event_thread->control.trap_expected = 0;
4494
4495 if (debug_infrun)
4496 fprintf_unfiltered (gdb_stdlog,
4497 "infrun: switching back to stepped thread\n");
4498
4499 ecs->event_thread = tp;
4500 ecs->ptid = tp->ptid;
4501 context_switch (ecs->ptid);
4502 keep_going (ecs);
4503 return;
4504 }
4505 }
4506
4507 /* Are we stepping to get the inferior out of the dynamic linker's
4508 hook (and possibly the dld itself) after catching a shlib
4509 event? */
4510 if (ecs->event_thread->stepping_through_solib_after_catch)
4511 {
4512 #if defined(SOLIB_ADD)
4513 /* Have we reached our destination? If not, keep going. */
4514 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4515 {
4516 if (debug_infrun)
4517 fprintf_unfiltered (gdb_stdlog,
4518 "infrun: stepping in dynamic linker\n");
4519 ecs->event_thread->stepping_over_breakpoint = 1;
4520 keep_going (ecs);
4521 return;
4522 }
4523 #endif
4524 if (debug_infrun)
4525 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4526 /* Else, stop and report the catchpoint(s) whose triggering
4527 caused us to begin stepping. */
4528 ecs->event_thread->stepping_through_solib_after_catch = 0;
4529 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4530 ecs->event_thread->control.stop_bpstat
4531 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4532 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4533 stop_print_frame = 1;
4534 stop_stepping (ecs);
4535 return;
4536 }
4537
4538 if (ecs->event_thread->control.step_resume_breakpoint)
4539 {
4540 if (debug_infrun)
4541 fprintf_unfiltered (gdb_stdlog,
4542 "infrun: step-resume breakpoint is inserted\n");
4543
4544 /* Having a step-resume breakpoint overrides anything
4545 else having to do with stepping commands until
4546 that breakpoint is reached. */
4547 keep_going (ecs);
4548 return;
4549 }
4550
4551 if (ecs->event_thread->control.step_range_end == 0)
4552 {
4553 if (debug_infrun)
4554 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4555 /* Likewise if we aren't even stepping. */
4556 keep_going (ecs);
4557 return;
4558 }
4559
4560 /* Re-fetch current thread's frame in case the code above caused
4561 the frame cache to be re-initialized, making our FRAME variable
4562 a dangling pointer. */
4563 frame = get_current_frame ();
4564 gdbarch = get_frame_arch (frame);
4565
4566 /* If stepping through a line, keep going if still within it.
4567
4568 Note that step_range_end is the address of the first instruction
4569 beyond the step range, and NOT the address of the last instruction
4570 within it!
4571
4572 Note also that during reverse execution, we may be stepping
4573 through a function epilogue and therefore must detect when
4574 the current-frame changes in the middle of a line. */
4575
4576 if (stop_pc >= ecs->event_thread->control.step_range_start
4577 && stop_pc < ecs->event_thread->control.step_range_end
4578 && (execution_direction != EXEC_REVERSE
4579 || frame_id_eq (get_frame_id (frame),
4580 ecs->event_thread->control.step_frame_id)))
4581 {
4582 if (debug_infrun)
4583 fprintf_unfiltered
4584 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4585 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4586 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4587
4588 /* When stepping backward, stop at beginning of line range
4589 (unless it's the function entry point, in which case
4590 keep going back to the call point). */
4591 if (stop_pc == ecs->event_thread->control.step_range_start
4592 && stop_pc != ecs->stop_func_start
4593 && execution_direction == EXEC_REVERSE)
4594 {
4595 ecs->event_thread->control.stop_step = 1;
4596 print_end_stepping_range_reason ();
4597 stop_stepping (ecs);
4598 }
4599 else
4600 keep_going (ecs);
4601
4602 return;
4603 }
4604
4605 /* We stepped out of the stepping range. */
4606
4607 /* If we are stepping at the source level and entered the runtime
4608 loader dynamic symbol resolution code...
4609
4610 EXEC_FORWARD: we keep on single stepping until we exit the run
4611 time loader code and reach the callee's address.
4612
4613 EXEC_REVERSE: we've already executed the callee (backward), and
4614 the runtime loader code is handled just like any other
4615 undebuggable function call. Now we need only keep stepping
4616 backward through the trampoline code, and that's handled further
4617 down, so there is nothing for us to do here. */
4618
4619 if (execution_direction != EXEC_REVERSE
4620 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4621 && in_solib_dynsym_resolve_code (stop_pc))
4622 {
4623 CORE_ADDR pc_after_resolver =
4624 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4625
4626 if (debug_infrun)
4627 fprintf_unfiltered (gdb_stdlog,
4628 "infrun: stepped into dynsym resolve code\n");
4629
4630 if (pc_after_resolver)
4631 {
4632 /* Set up a step-resume breakpoint at the address
4633 indicated by SKIP_SOLIB_RESOLVER. */
4634 struct symtab_and_line sr_sal;
4635
4636 init_sal (&sr_sal);
4637 sr_sal.pc = pc_after_resolver;
4638 sr_sal.pspace = get_frame_program_space (frame);
4639
4640 insert_step_resume_breakpoint_at_sal (gdbarch,
4641 sr_sal, null_frame_id);
4642 }
4643
4644 keep_going (ecs);
4645 return;
4646 }
4647
4648 if (ecs->event_thread->control.step_range_end != 1
4649 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4650 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4651 && get_frame_type (frame) == SIGTRAMP_FRAME)
4652 {
4653 if (debug_infrun)
4654 fprintf_unfiltered (gdb_stdlog,
4655 "infrun: stepped into signal trampoline\n");
4656 /* The inferior, while doing a "step" or "next", has ended up in
4657 a signal trampoline (either by a signal being delivered or by
4658 the signal handler returning). Just single-step until the
4659 inferior leaves the trampoline (either by calling the handler
4660 or returning). */
4661 keep_going (ecs);
4662 return;
4663 }
4664
4665 /* Check for subroutine calls. The check for the current frame
4666 equalling the step ID is not necessary - the check of the
4667 previous frame's ID is sufficient - but it is a common case and
4668 cheaper than checking the previous frame's ID.
4669
4670 NOTE: frame_id_eq will never report two invalid frame IDs as
4671 being equal, so to get into this block, both the current and
4672 previous frame must have valid frame IDs. */
4673 /* The outer_frame_id check is a heuristic to detect stepping
4674 through startup code. If we step over an instruction which
4675 sets the stack pointer from an invalid value to a valid value,
4676 we may detect that as a subroutine call from the mythical
4677 "outermost" function. This could be fixed by marking
4678 outermost frames as !stack_p,code_p,special_p. Then the
4679 initial outermost frame, before sp was valid, would
4680 have code_addr == &_start. See the comment in frame_id_eq
4681 for more. */
4682 if (!frame_id_eq (get_stack_frame_id (frame),
4683 ecs->event_thread->control.step_stack_frame_id)
4684 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4685 ecs->event_thread->control.step_stack_frame_id)
4686 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4687 outer_frame_id)
4688 || step_start_function != find_pc_function (stop_pc))))
4689 {
4690 CORE_ADDR real_stop_pc;
4691
4692 if (debug_infrun)
4693 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4694
4695 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4696 || ((ecs->event_thread->control.step_range_end == 1)
4697 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4698 ecs->stop_func_start)))
4699 {
4700 /* I presume that step_over_calls is only 0 when we're
4701 supposed to be stepping at the assembly language level
4702 ("stepi"). Just stop. */
4703 /* Also, maybe we just did a "nexti" inside a prolog, so we
4704 thought it was a subroutine call but it was not. Stop as
4705 well. FENN */
4706 /* And this works the same backward as frontward. MVS */
4707 ecs->event_thread->control.stop_step = 1;
4708 print_end_stepping_range_reason ();
4709 stop_stepping (ecs);
4710 return;
4711 }
4712
4713 /* Reverse stepping through solib trampolines. */
4714
4715 if (execution_direction == EXEC_REVERSE
4716 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4717 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4718 || (ecs->stop_func_start == 0
4719 && in_solib_dynsym_resolve_code (stop_pc))))
4720 {
4721 /* Any solib trampoline code can be handled in reverse
4722 by simply continuing to single-step. We have already
4723 executed the solib function (backwards), and a few
4724 steps will take us back through the trampoline to the
4725 caller. */
4726 keep_going (ecs);
4727 return;
4728 }
4729
4730 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4731 {
4732 /* We're doing a "next".
4733
4734 Normal (forward) execution: set a breakpoint at the
4735 callee's return address (the address at which the caller
4736 will resume).
4737
4738 Reverse (backward) execution. set the step-resume
4739 breakpoint at the start of the function that we just
4740 stepped into (backwards), and continue to there. When we
4741 get there, we'll need to single-step back to the caller. */
4742
4743 if (execution_direction == EXEC_REVERSE)
4744 {
4745 struct symtab_and_line sr_sal;
4746
4747 /* Normal function call return (static or dynamic). */
4748 init_sal (&sr_sal);
4749 sr_sal.pc = ecs->stop_func_start;
4750 sr_sal.pspace = get_frame_program_space (frame);
4751 insert_step_resume_breakpoint_at_sal (gdbarch,
4752 sr_sal, null_frame_id);
4753 }
4754 else
4755 insert_step_resume_breakpoint_at_caller (frame);
4756
4757 keep_going (ecs);
4758 return;
4759 }
4760
4761 /* If we are in a function call trampoline (a stub between the
4762 calling routine and the real function), locate the real
4763 function. That's what tells us (a) whether we want to step
4764 into it at all, and (b) what prologue we want to run to the
4765 end of, if we do step into it. */
4766 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4767 if (real_stop_pc == 0)
4768 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4769 if (real_stop_pc != 0)
4770 ecs->stop_func_start = real_stop_pc;
4771
4772 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4773 {
4774 struct symtab_and_line sr_sal;
4775
4776 init_sal (&sr_sal);
4777 sr_sal.pc = ecs->stop_func_start;
4778 sr_sal.pspace = get_frame_program_space (frame);
4779
4780 insert_step_resume_breakpoint_at_sal (gdbarch,
4781 sr_sal, null_frame_id);
4782 keep_going (ecs);
4783 return;
4784 }
4785
4786 /* If we have line number information for the function we are
4787 thinking of stepping into, step into it.
4788
4789 If there are several symtabs at that PC (e.g. with include
4790 files), just want to know whether *any* of them have line
4791 numbers. find_pc_line handles this. */
4792 {
4793 struct symtab_and_line tmp_sal;
4794
4795 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4796 if (tmp_sal.line != 0)
4797 {
4798 if (execution_direction == EXEC_REVERSE)
4799 handle_step_into_function_backward (gdbarch, ecs);
4800 else
4801 handle_step_into_function (gdbarch, ecs);
4802 return;
4803 }
4804 }
4805
4806 /* If we have no line number and the step-stop-if-no-debug is
4807 set, we stop the step so that the user has a chance to switch
4808 in assembly mode. */
4809 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4810 && step_stop_if_no_debug)
4811 {
4812 ecs->event_thread->control.stop_step = 1;
4813 print_end_stepping_range_reason ();
4814 stop_stepping (ecs);
4815 return;
4816 }
4817
4818 if (execution_direction == EXEC_REVERSE)
4819 {
4820 /* Set a breakpoint at callee's start address.
4821 From there we can step once and be back in the caller. */
4822 struct symtab_and_line sr_sal;
4823
4824 init_sal (&sr_sal);
4825 sr_sal.pc = ecs->stop_func_start;
4826 sr_sal.pspace = get_frame_program_space (frame);
4827 insert_step_resume_breakpoint_at_sal (gdbarch,
4828 sr_sal, null_frame_id);
4829 }
4830 else
4831 /* Set a breakpoint at callee's return address (the address
4832 at which the caller will resume). */
4833 insert_step_resume_breakpoint_at_caller (frame);
4834
4835 keep_going (ecs);
4836 return;
4837 }
4838
4839 /* Reverse stepping through solib trampolines. */
4840
4841 if (execution_direction == EXEC_REVERSE
4842 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4843 {
4844 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4845 || (ecs->stop_func_start == 0
4846 && in_solib_dynsym_resolve_code (stop_pc)))
4847 {
4848 /* Any solib trampoline code can be handled in reverse
4849 by simply continuing to single-step. We have already
4850 executed the solib function (backwards), and a few
4851 steps will take us back through the trampoline to the
4852 caller. */
4853 keep_going (ecs);
4854 return;
4855 }
4856 else if (in_solib_dynsym_resolve_code (stop_pc))
4857 {
4858 /* Stepped backward into the solib dynsym resolver.
4859 Set a breakpoint at its start and continue, then
4860 one more step will take us out. */
4861 struct symtab_and_line sr_sal;
4862
4863 init_sal (&sr_sal);
4864 sr_sal.pc = ecs->stop_func_start;
4865 sr_sal.pspace = get_frame_program_space (frame);
4866 insert_step_resume_breakpoint_at_sal (gdbarch,
4867 sr_sal, null_frame_id);
4868 keep_going (ecs);
4869 return;
4870 }
4871 }
4872
4873 /* If we're in the return path from a shared library trampoline,
4874 we want to proceed through the trampoline when stepping. */
4875 if (gdbarch_in_solib_return_trampoline (gdbarch,
4876 stop_pc, ecs->stop_func_name))
4877 {
4878 /* Determine where this trampoline returns. */
4879 CORE_ADDR real_stop_pc;
4880
4881 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4882
4883 if (debug_infrun)
4884 fprintf_unfiltered (gdb_stdlog,
4885 "infrun: stepped into solib return tramp\n");
4886
4887 /* Only proceed through if we know where it's going. */
4888 if (real_stop_pc)
4889 {
4890 /* And put the step-breakpoint there and go until there. */
4891 struct symtab_and_line sr_sal;
4892
4893 init_sal (&sr_sal); /* initialize to zeroes */
4894 sr_sal.pc = real_stop_pc;
4895 sr_sal.section = find_pc_overlay (sr_sal.pc);
4896 sr_sal.pspace = get_frame_program_space (frame);
4897
4898 /* Do not specify what the fp should be when we stop since
4899 on some machines the prologue is where the new fp value
4900 is established. */
4901 insert_step_resume_breakpoint_at_sal (gdbarch,
4902 sr_sal, null_frame_id);
4903
4904 /* Restart without fiddling with the step ranges or
4905 other state. */
4906 keep_going (ecs);
4907 return;
4908 }
4909 }
4910
4911 stop_pc_sal = find_pc_line (stop_pc, 0);
4912
4913 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4914 the trampoline processing logic, however, there are some trampolines
4915 that have no names, so we should do trampoline handling first. */
4916 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4917 && ecs->stop_func_name == NULL
4918 && stop_pc_sal.line == 0)
4919 {
4920 if (debug_infrun)
4921 fprintf_unfiltered (gdb_stdlog,
4922 "infrun: stepped into undebuggable function\n");
4923
4924 /* The inferior just stepped into, or returned to, an
4925 undebuggable function (where there is no debugging information
4926 and no line number corresponding to the address where the
4927 inferior stopped). Since we want to skip this kind of code,
4928 we keep going until the inferior returns from this
4929 function - unless the user has asked us not to (via
4930 set step-mode) or we no longer know how to get back
4931 to the call site. */
4932 if (step_stop_if_no_debug
4933 || !frame_id_p (frame_unwind_caller_id (frame)))
4934 {
4935 /* If we have no line number and the step-stop-if-no-debug
4936 is set, we stop the step so that the user has a chance to
4937 switch in assembly mode. */
4938 ecs->event_thread->control.stop_step = 1;
4939 print_end_stepping_range_reason ();
4940 stop_stepping (ecs);
4941 return;
4942 }
4943 else
4944 {
4945 /* Set a breakpoint at callee's return address (the address
4946 at which the caller will resume). */
4947 insert_step_resume_breakpoint_at_caller (frame);
4948 keep_going (ecs);
4949 return;
4950 }
4951 }
4952
4953 if (ecs->event_thread->control.step_range_end == 1)
4954 {
4955 /* It is stepi or nexti. We always want to stop stepping after
4956 one instruction. */
4957 if (debug_infrun)
4958 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4959 ecs->event_thread->control.stop_step = 1;
4960 print_end_stepping_range_reason ();
4961 stop_stepping (ecs);
4962 return;
4963 }
4964
4965 if (stop_pc_sal.line == 0)
4966 {
4967 /* We have no line number information. That means to stop
4968 stepping (does this always happen right after one instruction,
4969 when we do "s" in a function with no line numbers,
4970 or can this happen as a result of a return or longjmp?). */
4971 if (debug_infrun)
4972 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4973 ecs->event_thread->control.stop_step = 1;
4974 print_end_stepping_range_reason ();
4975 stop_stepping (ecs);
4976 return;
4977 }
4978
4979 /* Look for "calls" to inlined functions, part one. If the inline
4980 frame machinery detected some skipped call sites, we have entered
4981 a new inline function. */
4982
4983 if (frame_id_eq (get_frame_id (get_current_frame ()),
4984 ecs->event_thread->control.step_frame_id)
4985 && inline_skipped_frames (ecs->ptid))
4986 {
4987 struct symtab_and_line call_sal;
4988
4989 if (debug_infrun)
4990 fprintf_unfiltered (gdb_stdlog,
4991 "infrun: stepped into inlined function\n");
4992
4993 find_frame_sal (get_current_frame (), &call_sal);
4994
4995 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
4996 {
4997 /* For "step", we're going to stop. But if the call site
4998 for this inlined function is on the same source line as
4999 we were previously stepping, go down into the function
5000 first. Otherwise stop at the call site. */
5001
5002 if (call_sal.line == ecs->event_thread->current_line
5003 && call_sal.symtab == ecs->event_thread->current_symtab)
5004 step_into_inline_frame (ecs->ptid);
5005
5006 ecs->event_thread->control.stop_step = 1;
5007 print_end_stepping_range_reason ();
5008 stop_stepping (ecs);
5009 return;
5010 }
5011 else
5012 {
5013 /* For "next", we should stop at the call site if it is on a
5014 different source line. Otherwise continue through the
5015 inlined function. */
5016 if (call_sal.line == ecs->event_thread->current_line
5017 && call_sal.symtab == ecs->event_thread->current_symtab)
5018 keep_going (ecs);
5019 else
5020 {
5021 ecs->event_thread->control.stop_step = 1;
5022 print_end_stepping_range_reason ();
5023 stop_stepping (ecs);
5024 }
5025 return;
5026 }
5027 }
5028
5029 /* Look for "calls" to inlined functions, part two. If we are still
5030 in the same real function we were stepping through, but we have
5031 to go further up to find the exact frame ID, we are stepping
5032 through a more inlined call beyond its call site. */
5033
5034 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5035 && !frame_id_eq (get_frame_id (get_current_frame ()),
5036 ecs->event_thread->control.step_frame_id)
5037 && stepped_in_from (get_current_frame (),
5038 ecs->event_thread->control.step_frame_id))
5039 {
5040 if (debug_infrun)
5041 fprintf_unfiltered (gdb_stdlog,
5042 "infrun: stepping through inlined function\n");
5043
5044 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5045 keep_going (ecs);
5046 else
5047 {
5048 ecs->event_thread->control.stop_step = 1;
5049 print_end_stepping_range_reason ();
5050 stop_stepping (ecs);
5051 }
5052 return;
5053 }
5054
5055 if ((stop_pc == stop_pc_sal.pc)
5056 && (ecs->event_thread->current_line != stop_pc_sal.line
5057 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5058 {
5059 /* We are at the start of a different line. So stop. Note that
5060 we don't stop if we step into the middle of a different line.
5061 That is said to make things like for (;;) statements work
5062 better. */
5063 if (debug_infrun)
5064 fprintf_unfiltered (gdb_stdlog,
5065 "infrun: stepped to a different line\n");
5066 ecs->event_thread->control.stop_step = 1;
5067 print_end_stepping_range_reason ();
5068 stop_stepping (ecs);
5069 return;
5070 }
5071
5072 /* We aren't done stepping.
5073
5074 Optimize by setting the stepping range to the line.
5075 (We might not be in the original line, but if we entered a
5076 new line in mid-statement, we continue stepping. This makes
5077 things like for(;;) statements work better.) */
5078
5079 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5080 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5081 set_step_info (frame, stop_pc_sal);
5082
5083 if (debug_infrun)
5084 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5085 keep_going (ecs);
5086 }
5087
5088 /* Is thread TP in the middle of single-stepping? */
5089
5090 static int
5091 currently_stepping (struct thread_info *tp)
5092 {
5093 return ((tp->control.step_range_end
5094 && tp->control.step_resume_breakpoint == NULL)
5095 || tp->control.trap_expected
5096 || tp->stepping_through_solib_after_catch
5097 || bpstat_should_step ());
5098 }
5099
5100 /* Returns true if any thread *but* the one passed in "data" is in the
5101 middle of stepping or of handling a "next". */
5102
5103 static int
5104 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5105 {
5106 if (tp == data)
5107 return 0;
5108
5109 return (tp->control.step_range_end
5110 || tp->control.trap_expected
5111 || tp->stepping_through_solib_after_catch);
5112 }
5113
5114 /* Inferior has stepped into a subroutine call with source code that
5115 we should not step over. Do step to the first line of code in
5116 it. */
5117
5118 static void
5119 handle_step_into_function (struct gdbarch *gdbarch,
5120 struct execution_control_state *ecs)
5121 {
5122 struct symtab *s;
5123 struct symtab_and_line stop_func_sal, sr_sal;
5124
5125 s = find_pc_symtab (stop_pc);
5126 if (s && s->language != language_asm)
5127 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5128 ecs->stop_func_start);
5129
5130 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5131 /* Use the step_resume_break to step until the end of the prologue,
5132 even if that involves jumps (as it seems to on the vax under
5133 4.2). */
5134 /* If the prologue ends in the middle of a source line, continue to
5135 the end of that source line (if it is still within the function).
5136 Otherwise, just go to end of prologue. */
5137 if (stop_func_sal.end
5138 && stop_func_sal.pc != ecs->stop_func_start
5139 && stop_func_sal.end < ecs->stop_func_end)
5140 ecs->stop_func_start = stop_func_sal.end;
5141
5142 /* Architectures which require breakpoint adjustment might not be able
5143 to place a breakpoint at the computed address. If so, the test
5144 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5145 ecs->stop_func_start to an address at which a breakpoint may be
5146 legitimately placed.
5147
5148 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5149 made, GDB will enter an infinite loop when stepping through
5150 optimized code consisting of VLIW instructions which contain
5151 subinstructions corresponding to different source lines. On
5152 FR-V, it's not permitted to place a breakpoint on any but the
5153 first subinstruction of a VLIW instruction. When a breakpoint is
5154 set, GDB will adjust the breakpoint address to the beginning of
5155 the VLIW instruction. Thus, we need to make the corresponding
5156 adjustment here when computing the stop address. */
5157
5158 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5159 {
5160 ecs->stop_func_start
5161 = gdbarch_adjust_breakpoint_address (gdbarch,
5162 ecs->stop_func_start);
5163 }
5164
5165 if (ecs->stop_func_start == stop_pc)
5166 {
5167 /* We are already there: stop now. */
5168 ecs->event_thread->control.stop_step = 1;
5169 print_end_stepping_range_reason ();
5170 stop_stepping (ecs);
5171 return;
5172 }
5173 else
5174 {
5175 /* Put the step-breakpoint there and go until there. */
5176 init_sal (&sr_sal); /* initialize to zeroes */
5177 sr_sal.pc = ecs->stop_func_start;
5178 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5179 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5180
5181 /* Do not specify what the fp should be when we stop since on
5182 some machines the prologue is where the new fp value is
5183 established. */
5184 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5185
5186 /* And make sure stepping stops right away then. */
5187 ecs->event_thread->control.step_range_end
5188 = ecs->event_thread->control.step_range_start;
5189 }
5190 keep_going (ecs);
5191 }
5192
5193 /* Inferior has stepped backward into a subroutine call with source
5194 code that we should not step over. Do step to the beginning of the
5195 last line of code in it. */
5196
5197 static void
5198 handle_step_into_function_backward (struct gdbarch *gdbarch,
5199 struct execution_control_state *ecs)
5200 {
5201 struct symtab *s;
5202 struct symtab_and_line stop_func_sal;
5203
5204 s = find_pc_symtab (stop_pc);
5205 if (s && s->language != language_asm)
5206 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5207 ecs->stop_func_start);
5208
5209 stop_func_sal = find_pc_line (stop_pc, 0);
5210
5211 /* OK, we're just going to keep stepping here. */
5212 if (stop_func_sal.pc == stop_pc)
5213 {
5214 /* We're there already. Just stop stepping now. */
5215 ecs->event_thread->control.stop_step = 1;
5216 print_end_stepping_range_reason ();
5217 stop_stepping (ecs);
5218 }
5219 else
5220 {
5221 /* Else just reset the step range and keep going.
5222 No step-resume breakpoint, they don't work for
5223 epilogues, which can have multiple entry paths. */
5224 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5225 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5226 keep_going (ecs);
5227 }
5228 return;
5229 }
5230
5231 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5232 This is used to both functions and to skip over code. */
5233
5234 static void
5235 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5236 struct symtab_and_line sr_sal,
5237 struct frame_id sr_id,
5238 enum bptype sr_type)
5239 {
5240 /* There should never be more than one step-resume or longjmp-resume
5241 breakpoint per thread, so we should never be setting a new
5242 step_resume_breakpoint when one is already active. */
5243 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5244 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5245
5246 if (debug_infrun)
5247 fprintf_unfiltered (gdb_stdlog,
5248 "infrun: inserting step-resume breakpoint at %s\n",
5249 paddress (gdbarch, sr_sal.pc));
5250
5251 inferior_thread ()->control.step_resume_breakpoint
5252 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5253 }
5254
5255 void
5256 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5257 struct symtab_and_line sr_sal,
5258 struct frame_id sr_id)
5259 {
5260 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5261 sr_sal, sr_id,
5262 bp_step_resume);
5263 }
5264
5265 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5266 This is used to skip a potential signal handler.
5267
5268 This is called with the interrupted function's frame. The signal
5269 handler, when it returns, will resume the interrupted function at
5270 RETURN_FRAME.pc. */
5271
5272 static void
5273 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5274 {
5275 struct symtab_and_line sr_sal;
5276 struct gdbarch *gdbarch;
5277
5278 gdb_assert (return_frame != NULL);
5279 init_sal (&sr_sal); /* initialize to zeros */
5280
5281 gdbarch = get_frame_arch (return_frame);
5282 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5283 sr_sal.section = find_pc_overlay (sr_sal.pc);
5284 sr_sal.pspace = get_frame_program_space (return_frame);
5285
5286 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5287 get_stack_frame_id (return_frame),
5288 bp_hp_step_resume);
5289 }
5290
5291 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5292 is used to skip a function after stepping into it (for "next" or if
5293 the called function has no debugging information).
5294
5295 The current function has almost always been reached by single
5296 stepping a call or return instruction. NEXT_FRAME belongs to the
5297 current function, and the breakpoint will be set at the caller's
5298 resume address.
5299
5300 This is a separate function rather than reusing
5301 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5302 get_prev_frame, which may stop prematurely (see the implementation
5303 of frame_unwind_caller_id for an example). */
5304
5305 static void
5306 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5307 {
5308 struct symtab_and_line sr_sal;
5309 struct gdbarch *gdbarch;
5310
5311 /* We shouldn't have gotten here if we don't know where the call site
5312 is. */
5313 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5314
5315 init_sal (&sr_sal); /* initialize to zeros */
5316
5317 gdbarch = frame_unwind_caller_arch (next_frame);
5318 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5319 frame_unwind_caller_pc (next_frame));
5320 sr_sal.section = find_pc_overlay (sr_sal.pc);
5321 sr_sal.pspace = frame_unwind_program_space (next_frame);
5322
5323 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5324 frame_unwind_caller_id (next_frame));
5325 }
5326
5327 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5328 new breakpoint at the target of a jmp_buf. The handling of
5329 longjmp-resume uses the same mechanisms used for handling
5330 "step-resume" breakpoints. */
5331
5332 static void
5333 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5334 {
5335 /* There should never be more than one step-resume or longjmp-resume
5336 breakpoint per thread, so we should never be setting a new
5337 longjmp_resume_breakpoint when one is already active. */
5338 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5339
5340 if (debug_infrun)
5341 fprintf_unfiltered (gdb_stdlog,
5342 "infrun: inserting longjmp-resume breakpoint at %s\n",
5343 paddress (gdbarch, pc));
5344
5345 inferior_thread ()->control.step_resume_breakpoint =
5346 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5347 }
5348
5349 /* Insert an exception resume breakpoint. TP is the thread throwing
5350 the exception. The block B is the block of the unwinder debug hook
5351 function. FRAME is the frame corresponding to the call to this
5352 function. SYM is the symbol of the function argument holding the
5353 target PC of the exception. */
5354
5355 static void
5356 insert_exception_resume_breakpoint (struct thread_info *tp,
5357 struct block *b,
5358 struct frame_info *frame,
5359 struct symbol *sym)
5360 {
5361 struct gdb_exception e;
5362
5363 /* We want to ignore errors here. */
5364 TRY_CATCH (e, RETURN_MASK_ERROR)
5365 {
5366 struct symbol *vsym;
5367 struct value *value;
5368 CORE_ADDR handler;
5369 struct breakpoint *bp;
5370
5371 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5372 value = read_var_value (vsym, frame);
5373 /* If the value was optimized out, revert to the old behavior. */
5374 if (! value_optimized_out (value))
5375 {
5376 handler = value_as_address (value);
5377
5378 if (debug_infrun)
5379 fprintf_unfiltered (gdb_stdlog,
5380 "infrun: exception resume at %lx\n",
5381 (unsigned long) handler);
5382
5383 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5384 handler, bp_exception_resume);
5385 bp->thread = tp->num;
5386 inferior_thread ()->control.exception_resume_breakpoint = bp;
5387 }
5388 }
5389 }
5390
5391 /* This is called when an exception has been intercepted. Check to
5392 see whether the exception's destination is of interest, and if so,
5393 set an exception resume breakpoint there. */
5394
5395 static void
5396 check_exception_resume (struct execution_control_state *ecs,
5397 struct frame_info *frame, struct symbol *func)
5398 {
5399 struct gdb_exception e;
5400
5401 TRY_CATCH (e, RETURN_MASK_ERROR)
5402 {
5403 struct block *b;
5404 struct dict_iterator iter;
5405 struct symbol *sym;
5406 int argno = 0;
5407
5408 /* The exception breakpoint is a thread-specific breakpoint on
5409 the unwinder's debug hook, declared as:
5410
5411 void _Unwind_DebugHook (void *cfa, void *handler);
5412
5413 The CFA argument indicates the frame to which control is
5414 about to be transferred. HANDLER is the destination PC.
5415
5416 We ignore the CFA and set a temporary breakpoint at HANDLER.
5417 This is not extremely efficient but it avoids issues in gdb
5418 with computing the DWARF CFA, and it also works even in weird
5419 cases such as throwing an exception from inside a signal
5420 handler. */
5421
5422 b = SYMBOL_BLOCK_VALUE (func);
5423 ALL_BLOCK_SYMBOLS (b, iter, sym)
5424 {
5425 if (!SYMBOL_IS_ARGUMENT (sym))
5426 continue;
5427
5428 if (argno == 0)
5429 ++argno;
5430 else
5431 {
5432 insert_exception_resume_breakpoint (ecs->event_thread,
5433 b, frame, sym);
5434 break;
5435 }
5436 }
5437 }
5438 }
5439
5440 static void
5441 stop_stepping (struct execution_control_state *ecs)
5442 {
5443 if (debug_infrun)
5444 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5445
5446 /* Let callers know we don't want to wait for the inferior anymore. */
5447 ecs->wait_some_more = 0;
5448 }
5449
5450 /* This function handles various cases where we need to continue
5451 waiting for the inferior. */
5452 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5453
5454 static void
5455 keep_going (struct execution_control_state *ecs)
5456 {
5457 /* Make sure normal_stop is called if we get a QUIT handled before
5458 reaching resume. */
5459 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5460
5461 /* Save the pc before execution, to compare with pc after stop. */
5462 ecs->event_thread->prev_pc
5463 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5464
5465 /* If we did not do break;, it means we should keep running the
5466 inferior and not return to debugger. */
5467
5468 if (ecs->event_thread->control.trap_expected
5469 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5470 {
5471 /* We took a signal (which we are supposed to pass through to
5472 the inferior, else we'd not get here) and we haven't yet
5473 gotten our trap. Simply continue. */
5474
5475 discard_cleanups (old_cleanups);
5476 resume (currently_stepping (ecs->event_thread),
5477 ecs->event_thread->suspend.stop_signal);
5478 }
5479 else
5480 {
5481 /* Either the trap was not expected, but we are continuing
5482 anyway (the user asked that this signal be passed to the
5483 child)
5484 -- or --
5485 The signal was SIGTRAP, e.g. it was our signal, but we
5486 decided we should resume from it.
5487
5488 We're going to run this baby now!
5489
5490 Note that insert_breakpoints won't try to re-insert
5491 already inserted breakpoints. Therefore, we don't
5492 care if breakpoints were already inserted, or not. */
5493
5494 if (ecs->event_thread->stepping_over_breakpoint)
5495 {
5496 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5497
5498 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5499 /* Since we can't do a displaced step, we have to remove
5500 the breakpoint while we step it. To keep things
5501 simple, we remove them all. */
5502 remove_breakpoints ();
5503 }
5504 else
5505 {
5506 struct gdb_exception e;
5507
5508 /* Stop stepping when inserting breakpoints
5509 has failed. */
5510 TRY_CATCH (e, RETURN_MASK_ERROR)
5511 {
5512 insert_breakpoints ();
5513 }
5514 if (e.reason < 0)
5515 {
5516 exception_print (gdb_stderr, e);
5517 stop_stepping (ecs);
5518 return;
5519 }
5520 }
5521
5522 ecs->event_thread->control.trap_expected
5523 = ecs->event_thread->stepping_over_breakpoint;
5524
5525 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5526 specifies that such a signal should be delivered to the
5527 target program).
5528
5529 Typically, this would occure when a user is debugging a
5530 target monitor on a simulator: the target monitor sets a
5531 breakpoint; the simulator encounters this break-point and
5532 halts the simulation handing control to GDB; GDB, noteing
5533 that the break-point isn't valid, returns control back to the
5534 simulator; the simulator then delivers the hardware
5535 equivalent of a SIGNAL_TRAP to the program being debugged. */
5536
5537 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5538 && !signal_program[ecs->event_thread->suspend.stop_signal])
5539 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5540
5541 discard_cleanups (old_cleanups);
5542 resume (currently_stepping (ecs->event_thread),
5543 ecs->event_thread->suspend.stop_signal);
5544 }
5545
5546 prepare_to_wait (ecs);
5547 }
5548
5549 /* This function normally comes after a resume, before
5550 handle_inferior_event exits. It takes care of any last bits of
5551 housekeeping, and sets the all-important wait_some_more flag. */
5552
5553 static void
5554 prepare_to_wait (struct execution_control_state *ecs)
5555 {
5556 if (debug_infrun)
5557 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5558
5559 /* This is the old end of the while loop. Let everybody know we
5560 want to wait for the inferior some more and get called again
5561 soon. */
5562 ecs->wait_some_more = 1;
5563 }
5564
5565 /* Several print_*_reason functions to print why the inferior has stopped.
5566 We always print something when the inferior exits, or receives a signal.
5567 The rest of the cases are dealt with later on in normal_stop and
5568 print_it_typical. Ideally there should be a call to one of these
5569 print_*_reason functions functions from handle_inferior_event each time
5570 stop_stepping is called. */
5571
5572 /* Print why the inferior has stopped.
5573 We are done with a step/next/si/ni command, print why the inferior has
5574 stopped. For now print nothing. Print a message only if not in the middle
5575 of doing a "step n" operation for n > 1. */
5576
5577 static void
5578 print_end_stepping_range_reason (void)
5579 {
5580 if ((!inferior_thread ()->step_multi
5581 || !inferior_thread ()->control.stop_step)
5582 && ui_out_is_mi_like_p (uiout))
5583 ui_out_field_string (uiout, "reason",
5584 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5585 }
5586
5587 /* The inferior was terminated by a signal, print why it stopped. */
5588
5589 static void
5590 print_signal_exited_reason (enum target_signal siggnal)
5591 {
5592 annotate_signalled ();
5593 if (ui_out_is_mi_like_p (uiout))
5594 ui_out_field_string
5595 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5596 ui_out_text (uiout, "\nProgram terminated with signal ");
5597 annotate_signal_name ();
5598 ui_out_field_string (uiout, "signal-name",
5599 target_signal_to_name (siggnal));
5600 annotate_signal_name_end ();
5601 ui_out_text (uiout, ", ");
5602 annotate_signal_string ();
5603 ui_out_field_string (uiout, "signal-meaning",
5604 target_signal_to_string (siggnal));
5605 annotate_signal_string_end ();
5606 ui_out_text (uiout, ".\n");
5607 ui_out_text (uiout, "The program no longer exists.\n");
5608 }
5609
5610 /* The inferior program is finished, print why it stopped. */
5611
5612 static void
5613 print_exited_reason (int exitstatus)
5614 {
5615 struct inferior *inf = current_inferior ();
5616 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5617
5618 annotate_exited (exitstatus);
5619 if (exitstatus)
5620 {
5621 if (ui_out_is_mi_like_p (uiout))
5622 ui_out_field_string (uiout, "reason",
5623 async_reason_lookup (EXEC_ASYNC_EXITED));
5624 ui_out_text (uiout, "[Inferior ");
5625 ui_out_text (uiout, plongest (inf->num));
5626 ui_out_text (uiout, " (");
5627 ui_out_text (uiout, pidstr);
5628 ui_out_text (uiout, ") exited with code ");
5629 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5630 ui_out_text (uiout, "]\n");
5631 }
5632 else
5633 {
5634 if (ui_out_is_mi_like_p (uiout))
5635 ui_out_field_string
5636 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5637 ui_out_text (uiout, "[Inferior ");
5638 ui_out_text (uiout, plongest (inf->num));
5639 ui_out_text (uiout, " (");
5640 ui_out_text (uiout, pidstr);
5641 ui_out_text (uiout, ") exited normally]\n");
5642 }
5643 /* Support the --return-child-result option. */
5644 return_child_result_value = exitstatus;
5645 }
5646
5647 /* Signal received, print why the inferior has stopped. The signal table
5648 tells us to print about it. */
5649
5650 static void
5651 print_signal_received_reason (enum target_signal siggnal)
5652 {
5653 annotate_signal ();
5654
5655 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5656 {
5657 struct thread_info *t = inferior_thread ();
5658
5659 ui_out_text (uiout, "\n[");
5660 ui_out_field_string (uiout, "thread-name",
5661 target_pid_to_str (t->ptid));
5662 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5663 ui_out_text (uiout, " stopped");
5664 }
5665 else
5666 {
5667 ui_out_text (uiout, "\nProgram received signal ");
5668 annotate_signal_name ();
5669 if (ui_out_is_mi_like_p (uiout))
5670 ui_out_field_string
5671 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5672 ui_out_field_string (uiout, "signal-name",
5673 target_signal_to_name (siggnal));
5674 annotate_signal_name_end ();
5675 ui_out_text (uiout, ", ");
5676 annotate_signal_string ();
5677 ui_out_field_string (uiout, "signal-meaning",
5678 target_signal_to_string (siggnal));
5679 annotate_signal_string_end ();
5680 }
5681 ui_out_text (uiout, ".\n");
5682 }
5683
5684 /* Reverse execution: target ran out of history info, print why the inferior
5685 has stopped. */
5686
5687 static void
5688 print_no_history_reason (void)
5689 {
5690 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5691 }
5692
5693 /* Here to return control to GDB when the inferior stops for real.
5694 Print appropriate messages, remove breakpoints, give terminal our modes.
5695
5696 STOP_PRINT_FRAME nonzero means print the executing frame
5697 (pc, function, args, file, line number and line text).
5698 BREAKPOINTS_FAILED nonzero means stop was due to error
5699 attempting to insert breakpoints. */
5700
5701 void
5702 normal_stop (void)
5703 {
5704 struct target_waitstatus last;
5705 ptid_t last_ptid;
5706 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5707
5708 get_last_target_status (&last_ptid, &last);
5709
5710 /* If an exception is thrown from this point on, make sure to
5711 propagate GDB's knowledge of the executing state to the
5712 frontend/user running state. A QUIT is an easy exception to see
5713 here, so do this before any filtered output. */
5714 if (!non_stop)
5715 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5716 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5717 && last.kind != TARGET_WAITKIND_EXITED)
5718 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5719
5720 /* In non-stop mode, we don't want GDB to switch threads behind the
5721 user's back, to avoid races where the user is typing a command to
5722 apply to thread x, but GDB switches to thread y before the user
5723 finishes entering the command. */
5724
5725 /* As with the notification of thread events, we want to delay
5726 notifying the user that we've switched thread context until
5727 the inferior actually stops.
5728
5729 There's no point in saying anything if the inferior has exited.
5730 Note that SIGNALLED here means "exited with a signal", not
5731 "received a signal". */
5732 if (!non_stop
5733 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5734 && target_has_execution
5735 && last.kind != TARGET_WAITKIND_SIGNALLED
5736 && last.kind != TARGET_WAITKIND_EXITED)
5737 {
5738 target_terminal_ours_for_output ();
5739 printf_filtered (_("[Switching to %s]\n"),
5740 target_pid_to_str (inferior_ptid));
5741 annotate_thread_changed ();
5742 previous_inferior_ptid = inferior_ptid;
5743 }
5744
5745 if (!breakpoints_always_inserted_mode () && target_has_execution)
5746 {
5747 if (remove_breakpoints ())
5748 {
5749 target_terminal_ours_for_output ();
5750 printf_filtered (_("Cannot remove breakpoints because "
5751 "program is no longer writable.\nFurther "
5752 "execution is probably impossible.\n"));
5753 }
5754 }
5755
5756 /* If an auto-display called a function and that got a signal,
5757 delete that auto-display to avoid an infinite recursion. */
5758
5759 if (stopped_by_random_signal)
5760 disable_current_display ();
5761
5762 /* Don't print a message if in the middle of doing a "step n"
5763 operation for n > 1 */
5764 if (target_has_execution
5765 && last.kind != TARGET_WAITKIND_SIGNALLED
5766 && last.kind != TARGET_WAITKIND_EXITED
5767 && inferior_thread ()->step_multi
5768 && inferior_thread ()->control.stop_step)
5769 goto done;
5770
5771 target_terminal_ours ();
5772
5773 /* Set the current source location. This will also happen if we
5774 display the frame below, but the current SAL will be incorrect
5775 during a user hook-stop function. */
5776 if (has_stack_frames () && !stop_stack_dummy)
5777 set_current_sal_from_frame (get_current_frame (), 1);
5778
5779 /* Let the user/frontend see the threads as stopped. */
5780 do_cleanups (old_chain);
5781
5782 /* Look up the hook_stop and run it (CLI internally handles problem
5783 of stop_command's pre-hook not existing). */
5784 if (stop_command)
5785 catch_errors (hook_stop_stub, stop_command,
5786 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5787
5788 if (!has_stack_frames ())
5789 goto done;
5790
5791 if (last.kind == TARGET_WAITKIND_SIGNALLED
5792 || last.kind == TARGET_WAITKIND_EXITED)
5793 goto done;
5794
5795 /* Select innermost stack frame - i.e., current frame is frame 0,
5796 and current location is based on that.
5797 Don't do this on return from a stack dummy routine,
5798 or if the program has exited. */
5799
5800 if (!stop_stack_dummy)
5801 {
5802 select_frame (get_current_frame ());
5803
5804 /* Print current location without a level number, if
5805 we have changed functions or hit a breakpoint.
5806 Print source line if we have one.
5807 bpstat_print() contains the logic deciding in detail
5808 what to print, based on the event(s) that just occurred. */
5809
5810 /* If --batch-silent is enabled then there's no need to print the current
5811 source location, and to try risks causing an error message about
5812 missing source files. */
5813 if (stop_print_frame && !batch_silent)
5814 {
5815 int bpstat_ret;
5816 int source_flag;
5817 int do_frame_printing = 1;
5818 struct thread_info *tp = inferior_thread ();
5819
5820 bpstat_ret = bpstat_print (tp->control.stop_bpstat);
5821 switch (bpstat_ret)
5822 {
5823 case PRINT_UNKNOWN:
5824 /* If we had hit a shared library event breakpoint,
5825 bpstat_print would print out this message. If we hit
5826 an OS-level shared library event, do the same
5827 thing. */
5828 if (last.kind == TARGET_WAITKIND_LOADED)
5829 {
5830 printf_filtered (_("Stopped due to shared library event\n"));
5831 source_flag = SRC_LINE; /* something bogus */
5832 do_frame_printing = 0;
5833 break;
5834 }
5835
5836 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5837 (or should) carry around the function and does (or
5838 should) use that when doing a frame comparison. */
5839 if (tp->control.stop_step
5840 && frame_id_eq (tp->control.step_frame_id,
5841 get_frame_id (get_current_frame ()))
5842 && step_start_function == find_pc_function (stop_pc))
5843 source_flag = SRC_LINE; /* Finished step, just
5844 print source line. */
5845 else
5846 source_flag = SRC_AND_LOC; /* Print location and
5847 source line. */
5848 break;
5849 case PRINT_SRC_AND_LOC:
5850 source_flag = SRC_AND_LOC; /* Print location and
5851 source line. */
5852 break;
5853 case PRINT_SRC_ONLY:
5854 source_flag = SRC_LINE;
5855 break;
5856 case PRINT_NOTHING:
5857 source_flag = SRC_LINE; /* something bogus */
5858 do_frame_printing = 0;
5859 break;
5860 default:
5861 internal_error (__FILE__, __LINE__, _("Unknown value."));
5862 }
5863
5864 /* The behavior of this routine with respect to the source
5865 flag is:
5866 SRC_LINE: Print only source line
5867 LOCATION: Print only location
5868 SRC_AND_LOC: Print location and source line. */
5869 if (do_frame_printing)
5870 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5871
5872 /* Display the auto-display expressions. */
5873 do_displays ();
5874 }
5875 }
5876
5877 /* Save the function value return registers, if we care.
5878 We might be about to restore their previous contents. */
5879 if (inferior_thread ()->control.proceed_to_finish
5880 && execution_direction != EXEC_REVERSE)
5881 {
5882 /* This should not be necessary. */
5883 if (stop_registers)
5884 regcache_xfree (stop_registers);
5885
5886 /* NB: The copy goes through to the target picking up the value of
5887 all the registers. */
5888 stop_registers = regcache_dup (get_current_regcache ());
5889 }
5890
5891 if (stop_stack_dummy == STOP_STACK_DUMMY)
5892 {
5893 /* Pop the empty frame that contains the stack dummy.
5894 This also restores inferior state prior to the call
5895 (struct infcall_suspend_state). */
5896 struct frame_info *frame = get_current_frame ();
5897
5898 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5899 frame_pop (frame);
5900 /* frame_pop() calls reinit_frame_cache as the last thing it
5901 does which means there's currently no selected frame. We
5902 don't need to re-establish a selected frame if the dummy call
5903 returns normally, that will be done by
5904 restore_infcall_control_state. However, we do have to handle
5905 the case where the dummy call is returning after being
5906 stopped (e.g. the dummy call previously hit a breakpoint).
5907 We can't know which case we have so just always re-establish
5908 a selected frame here. */
5909 select_frame (get_current_frame ());
5910 }
5911
5912 done:
5913 annotate_stopped ();
5914
5915 /* Suppress the stop observer if we're in the middle of:
5916
5917 - a step n (n > 1), as there still more steps to be done.
5918
5919 - a "finish" command, as the observer will be called in
5920 finish_command_continuation, so it can include the inferior
5921 function's return value.
5922
5923 - calling an inferior function, as we pretend we inferior didn't
5924 run at all. The return value of the call is handled by the
5925 expression evaluator, through call_function_by_hand. */
5926
5927 if (!target_has_execution
5928 || last.kind == TARGET_WAITKIND_SIGNALLED
5929 || last.kind == TARGET_WAITKIND_EXITED
5930 || (!inferior_thread ()->step_multi
5931 && !(inferior_thread ()->control.stop_bpstat
5932 && inferior_thread ()->control.proceed_to_finish)
5933 && !inferior_thread ()->control.in_infcall))
5934 {
5935 if (!ptid_equal (inferior_ptid, null_ptid))
5936 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
5937 stop_print_frame);
5938 else
5939 observer_notify_normal_stop (NULL, stop_print_frame);
5940 }
5941
5942 if (target_has_execution)
5943 {
5944 if (last.kind != TARGET_WAITKIND_SIGNALLED
5945 && last.kind != TARGET_WAITKIND_EXITED)
5946 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5947 Delete any breakpoint that is to be deleted at the next stop. */
5948 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
5949 }
5950
5951 /* Try to get rid of automatically added inferiors that are no
5952 longer needed. Keeping those around slows down things linearly.
5953 Note that this never removes the current inferior. */
5954 prune_inferiors ();
5955 }
5956
5957 static int
5958 hook_stop_stub (void *cmd)
5959 {
5960 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5961 return (0);
5962 }
5963 \f
5964 int
5965 signal_stop_state (int signo)
5966 {
5967 return signal_stop[signo];
5968 }
5969
5970 int
5971 signal_print_state (int signo)
5972 {
5973 return signal_print[signo];
5974 }
5975
5976 int
5977 signal_pass_state (int signo)
5978 {
5979 return signal_program[signo];
5980 }
5981
5982 static void
5983 signal_cache_update (int signo)
5984 {
5985 if (signo == -1)
5986 {
5987 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
5988 signal_cache_update (signo);
5989
5990 return;
5991 }
5992
5993 signal_pass[signo] = (signal_stop[signo] == 0
5994 && signal_print[signo] == 0
5995 && signal_program[signo] == 1);
5996 }
5997
5998 int
5999 signal_stop_update (int signo, int state)
6000 {
6001 int ret = signal_stop[signo];
6002
6003 signal_stop[signo] = state;
6004 signal_cache_update (signo);
6005 return ret;
6006 }
6007
6008 int
6009 signal_print_update (int signo, int state)
6010 {
6011 int ret = signal_print[signo];
6012
6013 signal_print[signo] = state;
6014 signal_cache_update (signo);
6015 return ret;
6016 }
6017
6018 int
6019 signal_pass_update (int signo, int state)
6020 {
6021 int ret = signal_program[signo];
6022
6023 signal_program[signo] = state;
6024 signal_cache_update (signo);
6025 return ret;
6026 }
6027
6028 static void
6029 sig_print_header (void)
6030 {
6031 printf_filtered (_("Signal Stop\tPrint\tPass "
6032 "to program\tDescription\n"));
6033 }
6034
6035 static void
6036 sig_print_info (enum target_signal oursig)
6037 {
6038 const char *name = target_signal_to_name (oursig);
6039 int name_padding = 13 - strlen (name);
6040
6041 if (name_padding <= 0)
6042 name_padding = 0;
6043
6044 printf_filtered ("%s", name);
6045 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6046 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6047 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6048 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6049 printf_filtered ("%s\n", target_signal_to_string (oursig));
6050 }
6051
6052 /* Specify how various signals in the inferior should be handled. */
6053
6054 static void
6055 handle_command (char *args, int from_tty)
6056 {
6057 char **argv;
6058 int digits, wordlen;
6059 int sigfirst, signum, siglast;
6060 enum target_signal oursig;
6061 int allsigs;
6062 int nsigs;
6063 unsigned char *sigs;
6064 struct cleanup *old_chain;
6065
6066 if (args == NULL)
6067 {
6068 error_no_arg (_("signal to handle"));
6069 }
6070
6071 /* Allocate and zero an array of flags for which signals to handle. */
6072
6073 nsigs = (int) TARGET_SIGNAL_LAST;
6074 sigs = (unsigned char *) alloca (nsigs);
6075 memset (sigs, 0, nsigs);
6076
6077 /* Break the command line up into args. */
6078
6079 argv = gdb_buildargv (args);
6080 old_chain = make_cleanup_freeargv (argv);
6081
6082 /* Walk through the args, looking for signal oursigs, signal names, and
6083 actions. Signal numbers and signal names may be interspersed with
6084 actions, with the actions being performed for all signals cumulatively
6085 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6086
6087 while (*argv != NULL)
6088 {
6089 wordlen = strlen (*argv);
6090 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6091 {;
6092 }
6093 allsigs = 0;
6094 sigfirst = siglast = -1;
6095
6096 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6097 {
6098 /* Apply action to all signals except those used by the
6099 debugger. Silently skip those. */
6100 allsigs = 1;
6101 sigfirst = 0;
6102 siglast = nsigs - 1;
6103 }
6104 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6105 {
6106 SET_SIGS (nsigs, sigs, signal_stop);
6107 SET_SIGS (nsigs, sigs, signal_print);
6108 }
6109 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6110 {
6111 UNSET_SIGS (nsigs, sigs, signal_program);
6112 }
6113 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6114 {
6115 SET_SIGS (nsigs, sigs, signal_print);
6116 }
6117 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6118 {
6119 SET_SIGS (nsigs, sigs, signal_program);
6120 }
6121 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6122 {
6123 UNSET_SIGS (nsigs, sigs, signal_stop);
6124 }
6125 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6126 {
6127 SET_SIGS (nsigs, sigs, signal_program);
6128 }
6129 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6130 {
6131 UNSET_SIGS (nsigs, sigs, signal_print);
6132 UNSET_SIGS (nsigs, sigs, signal_stop);
6133 }
6134 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6135 {
6136 UNSET_SIGS (nsigs, sigs, signal_program);
6137 }
6138 else if (digits > 0)
6139 {
6140 /* It is numeric. The numeric signal refers to our own
6141 internal signal numbering from target.h, not to host/target
6142 signal number. This is a feature; users really should be
6143 using symbolic names anyway, and the common ones like
6144 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6145
6146 sigfirst = siglast = (int)
6147 target_signal_from_command (atoi (*argv));
6148 if ((*argv)[digits] == '-')
6149 {
6150 siglast = (int)
6151 target_signal_from_command (atoi ((*argv) + digits + 1));
6152 }
6153 if (sigfirst > siglast)
6154 {
6155 /* Bet he didn't figure we'd think of this case... */
6156 signum = sigfirst;
6157 sigfirst = siglast;
6158 siglast = signum;
6159 }
6160 }
6161 else
6162 {
6163 oursig = target_signal_from_name (*argv);
6164 if (oursig != TARGET_SIGNAL_UNKNOWN)
6165 {
6166 sigfirst = siglast = (int) oursig;
6167 }
6168 else
6169 {
6170 /* Not a number and not a recognized flag word => complain. */
6171 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6172 }
6173 }
6174
6175 /* If any signal numbers or symbol names were found, set flags for
6176 which signals to apply actions to. */
6177
6178 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6179 {
6180 switch ((enum target_signal) signum)
6181 {
6182 case TARGET_SIGNAL_TRAP:
6183 case TARGET_SIGNAL_INT:
6184 if (!allsigs && !sigs[signum])
6185 {
6186 if (query (_("%s is used by the debugger.\n\
6187 Are you sure you want to change it? "),
6188 target_signal_to_name ((enum target_signal) signum)))
6189 {
6190 sigs[signum] = 1;
6191 }
6192 else
6193 {
6194 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6195 gdb_flush (gdb_stdout);
6196 }
6197 }
6198 break;
6199 case TARGET_SIGNAL_0:
6200 case TARGET_SIGNAL_DEFAULT:
6201 case TARGET_SIGNAL_UNKNOWN:
6202 /* Make sure that "all" doesn't print these. */
6203 break;
6204 default:
6205 sigs[signum] = 1;
6206 break;
6207 }
6208 }
6209
6210 argv++;
6211 }
6212
6213 for (signum = 0; signum < nsigs; signum++)
6214 if (sigs[signum])
6215 {
6216 signal_cache_update (-1);
6217 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6218
6219 if (from_tty)
6220 {
6221 /* Show the results. */
6222 sig_print_header ();
6223 for (; signum < nsigs; signum++)
6224 if (sigs[signum])
6225 sig_print_info (signum);
6226 }
6227
6228 break;
6229 }
6230
6231 do_cleanups (old_chain);
6232 }
6233
6234 static void
6235 xdb_handle_command (char *args, int from_tty)
6236 {
6237 char **argv;
6238 struct cleanup *old_chain;
6239
6240 if (args == NULL)
6241 error_no_arg (_("xdb command"));
6242
6243 /* Break the command line up into args. */
6244
6245 argv = gdb_buildargv (args);
6246 old_chain = make_cleanup_freeargv (argv);
6247 if (argv[1] != (char *) NULL)
6248 {
6249 char *argBuf;
6250 int bufLen;
6251
6252 bufLen = strlen (argv[0]) + 20;
6253 argBuf = (char *) xmalloc (bufLen);
6254 if (argBuf)
6255 {
6256 int validFlag = 1;
6257 enum target_signal oursig;
6258
6259 oursig = target_signal_from_name (argv[0]);
6260 memset (argBuf, 0, bufLen);
6261 if (strcmp (argv[1], "Q") == 0)
6262 sprintf (argBuf, "%s %s", argv[0], "noprint");
6263 else
6264 {
6265 if (strcmp (argv[1], "s") == 0)
6266 {
6267 if (!signal_stop[oursig])
6268 sprintf (argBuf, "%s %s", argv[0], "stop");
6269 else
6270 sprintf (argBuf, "%s %s", argv[0], "nostop");
6271 }
6272 else if (strcmp (argv[1], "i") == 0)
6273 {
6274 if (!signal_program[oursig])
6275 sprintf (argBuf, "%s %s", argv[0], "pass");
6276 else
6277 sprintf (argBuf, "%s %s", argv[0], "nopass");
6278 }
6279 else if (strcmp (argv[1], "r") == 0)
6280 {
6281 if (!signal_print[oursig])
6282 sprintf (argBuf, "%s %s", argv[0], "print");
6283 else
6284 sprintf (argBuf, "%s %s", argv[0], "noprint");
6285 }
6286 else
6287 validFlag = 0;
6288 }
6289 if (validFlag)
6290 handle_command (argBuf, from_tty);
6291 else
6292 printf_filtered (_("Invalid signal handling flag.\n"));
6293 if (argBuf)
6294 xfree (argBuf);
6295 }
6296 }
6297 do_cleanups (old_chain);
6298 }
6299
6300 /* Print current contents of the tables set by the handle command.
6301 It is possible we should just be printing signals actually used
6302 by the current target (but for things to work right when switching
6303 targets, all signals should be in the signal tables). */
6304
6305 static void
6306 signals_info (char *signum_exp, int from_tty)
6307 {
6308 enum target_signal oursig;
6309
6310 sig_print_header ();
6311
6312 if (signum_exp)
6313 {
6314 /* First see if this is a symbol name. */
6315 oursig = target_signal_from_name (signum_exp);
6316 if (oursig == TARGET_SIGNAL_UNKNOWN)
6317 {
6318 /* No, try numeric. */
6319 oursig =
6320 target_signal_from_command (parse_and_eval_long (signum_exp));
6321 }
6322 sig_print_info (oursig);
6323 return;
6324 }
6325
6326 printf_filtered ("\n");
6327 /* These ugly casts brought to you by the native VAX compiler. */
6328 for (oursig = TARGET_SIGNAL_FIRST;
6329 (int) oursig < (int) TARGET_SIGNAL_LAST;
6330 oursig = (enum target_signal) ((int) oursig + 1))
6331 {
6332 QUIT;
6333
6334 if (oursig != TARGET_SIGNAL_UNKNOWN
6335 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6336 sig_print_info (oursig);
6337 }
6338
6339 printf_filtered (_("\nUse the \"handle\" command "
6340 "to change these tables.\n"));
6341 }
6342
6343 /* The $_siginfo convenience variable is a bit special. We don't know
6344 for sure the type of the value until we actually have a chance to
6345 fetch the data. The type can change depending on gdbarch, so it is
6346 also dependent on which thread you have selected.
6347
6348 1. making $_siginfo be an internalvar that creates a new value on
6349 access.
6350
6351 2. making the value of $_siginfo be an lval_computed value. */
6352
6353 /* This function implements the lval_computed support for reading a
6354 $_siginfo value. */
6355
6356 static void
6357 siginfo_value_read (struct value *v)
6358 {
6359 LONGEST transferred;
6360
6361 transferred =
6362 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6363 NULL,
6364 value_contents_all_raw (v),
6365 value_offset (v),
6366 TYPE_LENGTH (value_type (v)));
6367
6368 if (transferred != TYPE_LENGTH (value_type (v)))
6369 error (_("Unable to read siginfo"));
6370 }
6371
6372 /* This function implements the lval_computed support for writing a
6373 $_siginfo value. */
6374
6375 static void
6376 siginfo_value_write (struct value *v, struct value *fromval)
6377 {
6378 LONGEST transferred;
6379
6380 transferred = target_write (&current_target,
6381 TARGET_OBJECT_SIGNAL_INFO,
6382 NULL,
6383 value_contents_all_raw (fromval),
6384 value_offset (v),
6385 TYPE_LENGTH (value_type (fromval)));
6386
6387 if (transferred != TYPE_LENGTH (value_type (fromval)))
6388 error (_("Unable to write siginfo"));
6389 }
6390
6391 static struct lval_funcs siginfo_value_funcs =
6392 {
6393 siginfo_value_read,
6394 siginfo_value_write
6395 };
6396
6397 /* Return a new value with the correct type for the siginfo object of
6398 the current thread using architecture GDBARCH. Return a void value
6399 if there's no object available. */
6400
6401 static struct value *
6402 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6403 {
6404 if (target_has_stack
6405 && !ptid_equal (inferior_ptid, null_ptid)
6406 && gdbarch_get_siginfo_type_p (gdbarch))
6407 {
6408 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6409
6410 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6411 }
6412
6413 return allocate_value (builtin_type (gdbarch)->builtin_void);
6414 }
6415
6416 \f
6417 /* infcall_suspend_state contains state about the program itself like its
6418 registers and any signal it received when it last stopped.
6419 This state must be restored regardless of how the inferior function call
6420 ends (either successfully, or after it hits a breakpoint or signal)
6421 if the program is to properly continue where it left off. */
6422
6423 struct infcall_suspend_state
6424 {
6425 struct thread_suspend_state thread_suspend;
6426 struct inferior_suspend_state inferior_suspend;
6427
6428 /* Other fields: */
6429 CORE_ADDR stop_pc;
6430 struct regcache *registers;
6431
6432 /* Format of SIGINFO_DATA or NULL if it is not present. */
6433 struct gdbarch *siginfo_gdbarch;
6434
6435 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6436 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6437 content would be invalid. */
6438 gdb_byte *siginfo_data;
6439 };
6440
6441 struct infcall_suspend_state *
6442 save_infcall_suspend_state (void)
6443 {
6444 struct infcall_suspend_state *inf_state;
6445 struct thread_info *tp = inferior_thread ();
6446 struct inferior *inf = current_inferior ();
6447 struct regcache *regcache = get_current_regcache ();
6448 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6449 gdb_byte *siginfo_data = NULL;
6450
6451 if (gdbarch_get_siginfo_type_p (gdbarch))
6452 {
6453 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6454 size_t len = TYPE_LENGTH (type);
6455 struct cleanup *back_to;
6456
6457 siginfo_data = xmalloc (len);
6458 back_to = make_cleanup (xfree, siginfo_data);
6459
6460 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6461 siginfo_data, 0, len) == len)
6462 discard_cleanups (back_to);
6463 else
6464 {
6465 /* Errors ignored. */
6466 do_cleanups (back_to);
6467 siginfo_data = NULL;
6468 }
6469 }
6470
6471 inf_state = XZALLOC (struct infcall_suspend_state);
6472
6473 if (siginfo_data)
6474 {
6475 inf_state->siginfo_gdbarch = gdbarch;
6476 inf_state->siginfo_data = siginfo_data;
6477 }
6478
6479 inf_state->thread_suspend = tp->suspend;
6480 inf_state->inferior_suspend = inf->suspend;
6481
6482 /* run_inferior_call will not use the signal due to its `proceed' call with
6483 TARGET_SIGNAL_0 anyway. */
6484 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6485
6486 inf_state->stop_pc = stop_pc;
6487
6488 inf_state->registers = regcache_dup (regcache);
6489
6490 return inf_state;
6491 }
6492
6493 /* Restore inferior session state to INF_STATE. */
6494
6495 void
6496 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6497 {
6498 struct thread_info *tp = inferior_thread ();
6499 struct inferior *inf = current_inferior ();
6500 struct regcache *regcache = get_current_regcache ();
6501 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6502
6503 tp->suspend = inf_state->thread_suspend;
6504 inf->suspend = inf_state->inferior_suspend;
6505
6506 stop_pc = inf_state->stop_pc;
6507
6508 if (inf_state->siginfo_gdbarch == gdbarch)
6509 {
6510 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6511 size_t len = TYPE_LENGTH (type);
6512
6513 /* Errors ignored. */
6514 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6515 inf_state->siginfo_data, 0, len);
6516 }
6517
6518 /* The inferior can be gone if the user types "print exit(0)"
6519 (and perhaps other times). */
6520 if (target_has_execution)
6521 /* NB: The register write goes through to the target. */
6522 regcache_cpy (regcache, inf_state->registers);
6523
6524 discard_infcall_suspend_state (inf_state);
6525 }
6526
6527 static void
6528 do_restore_infcall_suspend_state_cleanup (void *state)
6529 {
6530 restore_infcall_suspend_state (state);
6531 }
6532
6533 struct cleanup *
6534 make_cleanup_restore_infcall_suspend_state
6535 (struct infcall_suspend_state *inf_state)
6536 {
6537 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6538 }
6539
6540 void
6541 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6542 {
6543 regcache_xfree (inf_state->registers);
6544 xfree (inf_state->siginfo_data);
6545 xfree (inf_state);
6546 }
6547
6548 struct regcache *
6549 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6550 {
6551 return inf_state->registers;
6552 }
6553
6554 /* infcall_control_state contains state regarding gdb's control of the
6555 inferior itself like stepping control. It also contains session state like
6556 the user's currently selected frame. */
6557
6558 struct infcall_control_state
6559 {
6560 struct thread_control_state thread_control;
6561 struct inferior_control_state inferior_control;
6562
6563 /* Other fields: */
6564 enum stop_stack_kind stop_stack_dummy;
6565 int stopped_by_random_signal;
6566 int stop_after_trap;
6567
6568 /* ID if the selected frame when the inferior function call was made. */
6569 struct frame_id selected_frame_id;
6570 };
6571
6572 /* Save all of the information associated with the inferior<==>gdb
6573 connection. */
6574
6575 struct infcall_control_state *
6576 save_infcall_control_state (void)
6577 {
6578 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6579 struct thread_info *tp = inferior_thread ();
6580 struct inferior *inf = current_inferior ();
6581
6582 inf_status->thread_control = tp->control;
6583 inf_status->inferior_control = inf->control;
6584
6585 tp->control.step_resume_breakpoint = NULL;
6586 tp->control.exception_resume_breakpoint = NULL;
6587
6588 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6589 chain. If caller's caller is walking the chain, they'll be happier if we
6590 hand them back the original chain when restore_infcall_control_state is
6591 called. */
6592 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6593
6594 /* Other fields: */
6595 inf_status->stop_stack_dummy = stop_stack_dummy;
6596 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6597 inf_status->stop_after_trap = stop_after_trap;
6598
6599 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6600
6601 return inf_status;
6602 }
6603
6604 static int
6605 restore_selected_frame (void *args)
6606 {
6607 struct frame_id *fid = (struct frame_id *) args;
6608 struct frame_info *frame;
6609
6610 frame = frame_find_by_id (*fid);
6611
6612 /* If inf_status->selected_frame_id is NULL, there was no previously
6613 selected frame. */
6614 if (frame == NULL)
6615 {
6616 warning (_("Unable to restore previously selected frame."));
6617 return 0;
6618 }
6619
6620 select_frame (frame);
6621
6622 return (1);
6623 }
6624
6625 /* Restore inferior session state to INF_STATUS. */
6626
6627 void
6628 restore_infcall_control_state (struct infcall_control_state *inf_status)
6629 {
6630 struct thread_info *tp = inferior_thread ();
6631 struct inferior *inf = current_inferior ();
6632
6633 if (tp->control.step_resume_breakpoint)
6634 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6635
6636 if (tp->control.exception_resume_breakpoint)
6637 tp->control.exception_resume_breakpoint->disposition
6638 = disp_del_at_next_stop;
6639
6640 /* Handle the bpstat_copy of the chain. */
6641 bpstat_clear (&tp->control.stop_bpstat);
6642
6643 tp->control = inf_status->thread_control;
6644 inf->control = inf_status->inferior_control;
6645
6646 /* Other fields: */
6647 stop_stack_dummy = inf_status->stop_stack_dummy;
6648 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6649 stop_after_trap = inf_status->stop_after_trap;
6650
6651 if (target_has_stack)
6652 {
6653 /* The point of catch_errors is that if the stack is clobbered,
6654 walking the stack might encounter a garbage pointer and
6655 error() trying to dereference it. */
6656 if (catch_errors
6657 (restore_selected_frame, &inf_status->selected_frame_id,
6658 "Unable to restore previously selected frame:\n",
6659 RETURN_MASK_ERROR) == 0)
6660 /* Error in restoring the selected frame. Select the innermost
6661 frame. */
6662 select_frame (get_current_frame ());
6663 }
6664
6665 xfree (inf_status);
6666 }
6667
6668 static void
6669 do_restore_infcall_control_state_cleanup (void *sts)
6670 {
6671 restore_infcall_control_state (sts);
6672 }
6673
6674 struct cleanup *
6675 make_cleanup_restore_infcall_control_state
6676 (struct infcall_control_state *inf_status)
6677 {
6678 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6679 }
6680
6681 void
6682 discard_infcall_control_state (struct infcall_control_state *inf_status)
6683 {
6684 if (inf_status->thread_control.step_resume_breakpoint)
6685 inf_status->thread_control.step_resume_breakpoint->disposition
6686 = disp_del_at_next_stop;
6687
6688 if (inf_status->thread_control.exception_resume_breakpoint)
6689 inf_status->thread_control.exception_resume_breakpoint->disposition
6690 = disp_del_at_next_stop;
6691
6692 /* See save_infcall_control_state for info on stop_bpstat. */
6693 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6694
6695 xfree (inf_status);
6696 }
6697 \f
6698 int
6699 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6700 {
6701 struct target_waitstatus last;
6702 ptid_t last_ptid;
6703
6704 get_last_target_status (&last_ptid, &last);
6705
6706 if (last.kind != TARGET_WAITKIND_FORKED)
6707 return 0;
6708
6709 if (!ptid_equal (last_ptid, pid))
6710 return 0;
6711
6712 *child_pid = last.value.related_pid;
6713 return 1;
6714 }
6715
6716 int
6717 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6718 {
6719 struct target_waitstatus last;
6720 ptid_t last_ptid;
6721
6722 get_last_target_status (&last_ptid, &last);
6723
6724 if (last.kind != TARGET_WAITKIND_VFORKED)
6725 return 0;
6726
6727 if (!ptid_equal (last_ptid, pid))
6728 return 0;
6729
6730 *child_pid = last.value.related_pid;
6731 return 1;
6732 }
6733
6734 int
6735 inferior_has_execd (ptid_t pid, char **execd_pathname)
6736 {
6737 struct target_waitstatus last;
6738 ptid_t last_ptid;
6739
6740 get_last_target_status (&last_ptid, &last);
6741
6742 if (last.kind != TARGET_WAITKIND_EXECD)
6743 return 0;
6744
6745 if (!ptid_equal (last_ptid, pid))
6746 return 0;
6747
6748 *execd_pathname = xstrdup (last.value.execd_pathname);
6749 return 1;
6750 }
6751
6752 int
6753 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6754 {
6755 struct target_waitstatus last;
6756 ptid_t last_ptid;
6757
6758 get_last_target_status (&last_ptid, &last);
6759
6760 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6761 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6762 return 0;
6763
6764 if (!ptid_equal (last_ptid, pid))
6765 return 0;
6766
6767 *syscall_number = last.value.syscall_number;
6768 return 1;
6769 }
6770
6771 /* Oft used ptids */
6772 ptid_t null_ptid;
6773 ptid_t minus_one_ptid;
6774
6775 /* Create a ptid given the necessary PID, LWP, and TID components. */
6776
6777 ptid_t
6778 ptid_build (int pid, long lwp, long tid)
6779 {
6780 ptid_t ptid;
6781
6782 ptid.pid = pid;
6783 ptid.lwp = lwp;
6784 ptid.tid = tid;
6785 return ptid;
6786 }
6787
6788 /* Create a ptid from just a pid. */
6789
6790 ptid_t
6791 pid_to_ptid (int pid)
6792 {
6793 return ptid_build (pid, 0, 0);
6794 }
6795
6796 /* Fetch the pid (process id) component from a ptid. */
6797
6798 int
6799 ptid_get_pid (ptid_t ptid)
6800 {
6801 return ptid.pid;
6802 }
6803
6804 /* Fetch the lwp (lightweight process) component from a ptid. */
6805
6806 long
6807 ptid_get_lwp (ptid_t ptid)
6808 {
6809 return ptid.lwp;
6810 }
6811
6812 /* Fetch the tid (thread id) component from a ptid. */
6813
6814 long
6815 ptid_get_tid (ptid_t ptid)
6816 {
6817 return ptid.tid;
6818 }
6819
6820 /* ptid_equal() is used to test equality of two ptids. */
6821
6822 int
6823 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6824 {
6825 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6826 && ptid1.tid == ptid2.tid);
6827 }
6828
6829 /* Returns true if PTID represents a process. */
6830
6831 int
6832 ptid_is_pid (ptid_t ptid)
6833 {
6834 if (ptid_equal (minus_one_ptid, ptid))
6835 return 0;
6836 if (ptid_equal (null_ptid, ptid))
6837 return 0;
6838
6839 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6840 }
6841
6842 int
6843 ptid_match (ptid_t ptid, ptid_t filter)
6844 {
6845 if (ptid_equal (filter, minus_one_ptid))
6846 return 1;
6847 if (ptid_is_pid (filter)
6848 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6849 return 1;
6850 else if (ptid_equal (ptid, filter))
6851 return 1;
6852
6853 return 0;
6854 }
6855
6856 /* restore_inferior_ptid() will be used by the cleanup machinery
6857 to restore the inferior_ptid value saved in a call to
6858 save_inferior_ptid(). */
6859
6860 static void
6861 restore_inferior_ptid (void *arg)
6862 {
6863 ptid_t *saved_ptid_ptr = arg;
6864
6865 inferior_ptid = *saved_ptid_ptr;
6866 xfree (arg);
6867 }
6868
6869 /* Save the value of inferior_ptid so that it may be restored by a
6870 later call to do_cleanups(). Returns the struct cleanup pointer
6871 needed for later doing the cleanup. */
6872
6873 struct cleanup *
6874 save_inferior_ptid (void)
6875 {
6876 ptid_t *saved_ptid_ptr;
6877
6878 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6879 *saved_ptid_ptr = inferior_ptid;
6880 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6881 }
6882 \f
6883
6884 /* User interface for reverse debugging:
6885 Set exec-direction / show exec-direction commands
6886 (returns error unless target implements to_set_exec_direction method). */
6887
6888 int execution_direction = EXEC_FORWARD;
6889 static const char exec_forward[] = "forward";
6890 static const char exec_reverse[] = "reverse";
6891 static const char *exec_direction = exec_forward;
6892 static const char *exec_direction_names[] = {
6893 exec_forward,
6894 exec_reverse,
6895 NULL
6896 };
6897
6898 static void
6899 set_exec_direction_func (char *args, int from_tty,
6900 struct cmd_list_element *cmd)
6901 {
6902 if (target_can_execute_reverse)
6903 {
6904 if (!strcmp (exec_direction, exec_forward))
6905 execution_direction = EXEC_FORWARD;
6906 else if (!strcmp (exec_direction, exec_reverse))
6907 execution_direction = EXEC_REVERSE;
6908 }
6909 else
6910 {
6911 exec_direction = exec_forward;
6912 error (_("Target does not support this operation."));
6913 }
6914 }
6915
6916 static void
6917 show_exec_direction_func (struct ui_file *out, int from_tty,
6918 struct cmd_list_element *cmd, const char *value)
6919 {
6920 switch (execution_direction) {
6921 case EXEC_FORWARD:
6922 fprintf_filtered (out, _("Forward.\n"));
6923 break;
6924 case EXEC_REVERSE:
6925 fprintf_filtered (out, _("Reverse.\n"));
6926 break;
6927 default:
6928 internal_error (__FILE__, __LINE__,
6929 _("bogus execution_direction value: %d"),
6930 (int) execution_direction);
6931 }
6932 }
6933
6934 /* User interface for non-stop mode. */
6935
6936 int non_stop = 0;
6937
6938 static void
6939 set_non_stop (char *args, int from_tty,
6940 struct cmd_list_element *c)
6941 {
6942 if (target_has_execution)
6943 {
6944 non_stop_1 = non_stop;
6945 error (_("Cannot change this setting while the inferior is running."));
6946 }
6947
6948 non_stop = non_stop_1;
6949 }
6950
6951 static void
6952 show_non_stop (struct ui_file *file, int from_tty,
6953 struct cmd_list_element *c, const char *value)
6954 {
6955 fprintf_filtered (file,
6956 _("Controlling the inferior in non-stop mode is %s.\n"),
6957 value);
6958 }
6959
6960 static void
6961 show_schedule_multiple (struct ui_file *file, int from_tty,
6962 struct cmd_list_element *c, const char *value)
6963 {
6964 fprintf_filtered (file, _("Resuming the execution of threads "
6965 "of all processes is %s.\n"), value);
6966 }
6967
6968 void
6969 _initialize_infrun (void)
6970 {
6971 int i;
6972 int numsigs;
6973
6974 add_info ("signals", signals_info, _("\
6975 What debugger does when program gets various signals.\n\
6976 Specify a signal as argument to print info on that signal only."));
6977 add_info_alias ("handle", "signals", 0);
6978
6979 add_com ("handle", class_run, handle_command, _("\
6980 Specify how to handle a signal.\n\
6981 Args are signals and actions to apply to those signals.\n\
6982 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6983 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6984 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6985 The special arg \"all\" is recognized to mean all signals except those\n\
6986 used by the debugger, typically SIGTRAP and SIGINT.\n\
6987 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6988 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6989 Stop means reenter debugger if this signal happens (implies print).\n\
6990 Print means print a message if this signal happens.\n\
6991 Pass means let program see this signal; otherwise program doesn't know.\n\
6992 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6993 Pass and Stop may be combined."));
6994 if (xdb_commands)
6995 {
6996 add_com ("lz", class_info, signals_info, _("\
6997 What debugger does when program gets various signals.\n\
6998 Specify a signal as argument to print info on that signal only."));
6999 add_com ("z", class_run, xdb_handle_command, _("\
7000 Specify how to handle a signal.\n\
7001 Args are signals and actions to apply to those signals.\n\
7002 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7003 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7004 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7005 The special arg \"all\" is recognized to mean all signals except those\n\
7006 used by the debugger, typically SIGTRAP and SIGINT.\n\
7007 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7008 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7009 nopass), \"Q\" (noprint)\n\
7010 Stop means reenter debugger if this signal happens (implies print).\n\
7011 Print means print a message if this signal happens.\n\
7012 Pass means let program see this signal; otherwise program doesn't know.\n\
7013 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7014 Pass and Stop may be combined."));
7015 }
7016
7017 if (!dbx_commands)
7018 stop_command = add_cmd ("stop", class_obscure,
7019 not_just_help_class_command, _("\
7020 There is no `stop' command, but you can set a hook on `stop'.\n\
7021 This allows you to set a list of commands to be run each time execution\n\
7022 of the program stops."), &cmdlist);
7023
7024 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7025 Set inferior debugging."), _("\
7026 Show inferior debugging."), _("\
7027 When non-zero, inferior specific debugging is enabled."),
7028 NULL,
7029 show_debug_infrun,
7030 &setdebuglist, &showdebuglist);
7031
7032 add_setshow_boolean_cmd ("displaced", class_maintenance,
7033 &debug_displaced, _("\
7034 Set displaced stepping debugging."), _("\
7035 Show displaced stepping debugging."), _("\
7036 When non-zero, displaced stepping specific debugging is enabled."),
7037 NULL,
7038 show_debug_displaced,
7039 &setdebuglist, &showdebuglist);
7040
7041 add_setshow_boolean_cmd ("non-stop", no_class,
7042 &non_stop_1, _("\
7043 Set whether gdb controls the inferior in non-stop mode."), _("\
7044 Show whether gdb controls the inferior in non-stop mode."), _("\
7045 When debugging a multi-threaded program and this setting is\n\
7046 off (the default, also called all-stop mode), when one thread stops\n\
7047 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7048 all other threads in the program while you interact with the thread of\n\
7049 interest. When you continue or step a thread, you can allow the other\n\
7050 threads to run, or have them remain stopped, but while you inspect any\n\
7051 thread's state, all threads stop.\n\
7052 \n\
7053 In non-stop mode, when one thread stops, other threads can continue\n\
7054 to run freely. You'll be able to step each thread independently,\n\
7055 leave it stopped or free to run as needed."),
7056 set_non_stop,
7057 show_non_stop,
7058 &setlist,
7059 &showlist);
7060
7061 numsigs = (int) TARGET_SIGNAL_LAST;
7062 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7063 signal_print = (unsigned char *)
7064 xmalloc (sizeof (signal_print[0]) * numsigs);
7065 signal_program = (unsigned char *)
7066 xmalloc (sizeof (signal_program[0]) * numsigs);
7067 signal_pass = (unsigned char *)
7068 xmalloc (sizeof (signal_program[0]) * numsigs);
7069 for (i = 0; i < numsigs; i++)
7070 {
7071 signal_stop[i] = 1;
7072 signal_print[i] = 1;
7073 signal_program[i] = 1;
7074 }
7075
7076 /* Signals caused by debugger's own actions
7077 should not be given to the program afterwards. */
7078 signal_program[TARGET_SIGNAL_TRAP] = 0;
7079 signal_program[TARGET_SIGNAL_INT] = 0;
7080
7081 /* Signals that are not errors should not normally enter the debugger. */
7082 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7083 signal_print[TARGET_SIGNAL_ALRM] = 0;
7084 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7085 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7086 signal_stop[TARGET_SIGNAL_PROF] = 0;
7087 signal_print[TARGET_SIGNAL_PROF] = 0;
7088 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7089 signal_print[TARGET_SIGNAL_CHLD] = 0;
7090 signal_stop[TARGET_SIGNAL_IO] = 0;
7091 signal_print[TARGET_SIGNAL_IO] = 0;
7092 signal_stop[TARGET_SIGNAL_POLL] = 0;
7093 signal_print[TARGET_SIGNAL_POLL] = 0;
7094 signal_stop[TARGET_SIGNAL_URG] = 0;
7095 signal_print[TARGET_SIGNAL_URG] = 0;
7096 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7097 signal_print[TARGET_SIGNAL_WINCH] = 0;
7098 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7099 signal_print[TARGET_SIGNAL_PRIO] = 0;
7100
7101 /* These signals are used internally by user-level thread
7102 implementations. (See signal(5) on Solaris.) Like the above
7103 signals, a healthy program receives and handles them as part of
7104 its normal operation. */
7105 signal_stop[TARGET_SIGNAL_LWP] = 0;
7106 signal_print[TARGET_SIGNAL_LWP] = 0;
7107 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7108 signal_print[TARGET_SIGNAL_WAITING] = 0;
7109 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7110 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7111
7112 /* Update cached state. */
7113 signal_cache_update (-1);
7114
7115 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7116 &stop_on_solib_events, _("\
7117 Set stopping for shared library events."), _("\
7118 Show stopping for shared library events."), _("\
7119 If nonzero, gdb will give control to the user when the dynamic linker\n\
7120 notifies gdb of shared library events. The most common event of interest\n\
7121 to the user would be loading/unloading of a new library."),
7122 NULL,
7123 show_stop_on_solib_events,
7124 &setlist, &showlist);
7125
7126 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7127 follow_fork_mode_kind_names,
7128 &follow_fork_mode_string, _("\
7129 Set debugger response to a program call of fork or vfork."), _("\
7130 Show debugger response to a program call of fork or vfork."), _("\
7131 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7132 parent - the original process is debugged after a fork\n\
7133 child - the new process is debugged after a fork\n\
7134 The unfollowed process will continue to run.\n\
7135 By default, the debugger will follow the parent process."),
7136 NULL,
7137 show_follow_fork_mode_string,
7138 &setlist, &showlist);
7139
7140 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7141 follow_exec_mode_names,
7142 &follow_exec_mode_string, _("\
7143 Set debugger response to a program call of exec."), _("\
7144 Show debugger response to a program call of exec."), _("\
7145 An exec call replaces the program image of a process.\n\
7146 \n\
7147 follow-exec-mode can be:\n\
7148 \n\
7149 new - the debugger creates a new inferior and rebinds the process\n\
7150 to this new inferior. The program the process was running before\n\
7151 the exec call can be restarted afterwards by restarting the original\n\
7152 inferior.\n\
7153 \n\
7154 same - the debugger keeps the process bound to the same inferior.\n\
7155 The new executable image replaces the previous executable loaded in\n\
7156 the inferior. Restarting the inferior after the exec call restarts\n\
7157 the executable the process was running after the exec call.\n\
7158 \n\
7159 By default, the debugger will use the same inferior."),
7160 NULL,
7161 show_follow_exec_mode_string,
7162 &setlist, &showlist);
7163
7164 add_setshow_enum_cmd ("scheduler-locking", class_run,
7165 scheduler_enums, &scheduler_mode, _("\
7166 Set mode for locking scheduler during execution."), _("\
7167 Show mode for locking scheduler during execution."), _("\
7168 off == no locking (threads may preempt at any time)\n\
7169 on == full locking (no thread except the current thread may run)\n\
7170 step == scheduler locked during every single-step operation.\n\
7171 In this mode, no other thread may run during a step command.\n\
7172 Other threads may run while stepping over a function call ('next')."),
7173 set_schedlock_func, /* traps on target vector */
7174 show_scheduler_mode,
7175 &setlist, &showlist);
7176
7177 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7178 Set mode for resuming threads of all processes."), _("\
7179 Show mode for resuming threads of all processes."), _("\
7180 When on, execution commands (such as 'continue' or 'next') resume all\n\
7181 threads of all processes. When off (which is the default), execution\n\
7182 commands only resume the threads of the current process. The set of\n\
7183 threads that are resumed is further refined by the scheduler-locking\n\
7184 mode (see help set scheduler-locking)."),
7185 NULL,
7186 show_schedule_multiple,
7187 &setlist, &showlist);
7188
7189 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7190 Set mode of the step operation."), _("\
7191 Show mode of the step operation."), _("\
7192 When set, doing a step over a function without debug line information\n\
7193 will stop at the first instruction of that function. Otherwise, the\n\
7194 function is skipped and the step command stops at a different source line."),
7195 NULL,
7196 show_step_stop_if_no_debug,
7197 &setlist, &showlist);
7198
7199 add_setshow_enum_cmd ("displaced-stepping", class_run,
7200 can_use_displaced_stepping_enum,
7201 &can_use_displaced_stepping, _("\
7202 Set debugger's willingness to use displaced stepping."), _("\
7203 Show debugger's willingness to use displaced stepping."), _("\
7204 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7205 supported by the target architecture. If off, gdb will not use displaced\n\
7206 stepping to step over breakpoints, even if such is supported by the target\n\
7207 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7208 if the target architecture supports it and non-stop mode is active, but will not\n\
7209 use it in all-stop mode (see help set non-stop)."),
7210 NULL,
7211 show_can_use_displaced_stepping,
7212 &setlist, &showlist);
7213
7214 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7215 &exec_direction, _("Set direction of execution.\n\
7216 Options are 'forward' or 'reverse'."),
7217 _("Show direction of execution (forward/reverse)."),
7218 _("Tells gdb whether to execute forward or backward."),
7219 set_exec_direction_func, show_exec_direction_func,
7220 &setlist, &showlist);
7221
7222 /* Set/show detach-on-fork: user-settable mode. */
7223
7224 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7225 Set whether gdb will detach the child of a fork."), _("\
7226 Show whether gdb will detach the child of a fork."), _("\
7227 Tells gdb whether to detach the child of a fork."),
7228 NULL, NULL, &setlist, &showlist);
7229
7230 /* ptid initializations */
7231 null_ptid = ptid_build (0, 0, 0);
7232 minus_one_ptid = ptid_build (-1, 0, 0);
7233 inferior_ptid = null_ptid;
7234 target_last_wait_ptid = minus_one_ptid;
7235
7236 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7237 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7238 observer_attach_thread_exit (infrun_thread_thread_exit);
7239 observer_attach_inferior_exit (infrun_inferior_exit);
7240
7241 /* Explicitly create without lookup, since that tries to create a
7242 value with a void typed value, and when we get here, gdbarch
7243 isn't initialized yet. At this point, we're quite sure there
7244 isn't another convenience variable of the same name. */
7245 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7246
7247 add_setshow_boolean_cmd ("observer", no_class,
7248 &observer_mode_1, _("\
7249 Set whether gdb controls the inferior in observer mode."), _("\
7250 Show whether gdb controls the inferior in observer mode."), _("\
7251 In observer mode, GDB can get data from the inferior, but not\n\
7252 affect its execution. Registers and memory may not be changed,\n\
7253 breakpoints may not be set, and the program cannot be interrupted\n\
7254 or signalled."),
7255 set_observer_mode,
7256 show_observer_mode,
7257 &setlist,
7258 &showlist);
7259 }
This page took 0.167339 seconds and 3 git commands to generate.