2011-06-06 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "dictionary.h"
49 #include "block.h"
50 #include "gdb_assert.h"
51 #include "mi/mi-common.h"
52 #include "event-top.h"
53 #include "record.h"
54 #include "inline-frame.h"
55 #include "jit.h"
56 #include "tracepoint.h"
57 #include "continuations.h"
58
59 /* Prototypes for local functions */
60
61 static void signals_info (char *, int);
62
63 static void handle_command (char *, int);
64
65 static void sig_print_info (enum target_signal);
66
67 static void sig_print_header (void);
68
69 static void resume_cleanups (void *);
70
71 static int hook_stop_stub (void *);
72
73 static int restore_selected_frame (void *);
74
75 static int follow_fork (void);
76
77 static void set_schedlock_func (char *args, int from_tty,
78 struct cmd_list_element *c);
79
80 static int currently_stepping (struct thread_info *tp);
81
82 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
83 void *data);
84
85 static void xdb_handle_command (char *args, int from_tty);
86
87 static int prepare_to_proceed (int);
88
89 static void print_exited_reason (int exitstatus);
90
91 static void print_signal_exited_reason (enum target_signal siggnal);
92
93 static void print_no_history_reason (void);
94
95 static void print_signal_received_reason (enum target_signal siggnal);
96
97 static void print_end_stepping_range_reason (void);
98
99 void _initialize_infrun (void);
100
101 void nullify_last_target_wait_ptid (void);
102
103 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
104
105 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
106
107 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
108
109 /* When set, stop the 'step' command if we enter a function which has
110 no line number information. The normal behavior is that we step
111 over such function. */
112 int step_stop_if_no_debug = 0;
113 static void
114 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
115 struct cmd_list_element *c, const char *value)
116 {
117 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
118 }
119
120 /* In asynchronous mode, but simulating synchronous execution. */
121
122 int sync_execution = 0;
123
124 /* wait_for_inferior and normal_stop use this to notify the user
125 when the inferior stopped in a different thread than it had been
126 running in. */
127
128 static ptid_t previous_inferior_ptid;
129
130 /* Default behavior is to detach newly forked processes (legacy). */
131 int detach_fork = 1;
132
133 int debug_displaced = 0;
134 static void
135 show_debug_displaced (struct ui_file *file, int from_tty,
136 struct cmd_list_element *c, const char *value)
137 {
138 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
139 }
140
141 int debug_infrun = 0;
142 static void
143 show_debug_infrun (struct ui_file *file, int from_tty,
144 struct cmd_list_element *c, const char *value)
145 {
146 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
147 }
148
149 /* If the program uses ELF-style shared libraries, then calls to
150 functions in shared libraries go through stubs, which live in a
151 table called the PLT (Procedure Linkage Table). The first time the
152 function is called, the stub sends control to the dynamic linker,
153 which looks up the function's real address, patches the stub so
154 that future calls will go directly to the function, and then passes
155 control to the function.
156
157 If we are stepping at the source level, we don't want to see any of
158 this --- we just want to skip over the stub and the dynamic linker.
159 The simple approach is to single-step until control leaves the
160 dynamic linker.
161
162 However, on some systems (e.g., Red Hat's 5.2 distribution) the
163 dynamic linker calls functions in the shared C library, so you
164 can't tell from the PC alone whether the dynamic linker is still
165 running. In this case, we use a step-resume breakpoint to get us
166 past the dynamic linker, as if we were using "next" to step over a
167 function call.
168
169 in_solib_dynsym_resolve_code() says whether we're in the dynamic
170 linker code or not. Normally, this means we single-step. However,
171 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
172 address where we can place a step-resume breakpoint to get past the
173 linker's symbol resolution function.
174
175 in_solib_dynsym_resolve_code() can generally be implemented in a
176 pretty portable way, by comparing the PC against the address ranges
177 of the dynamic linker's sections.
178
179 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
180 it depends on internal details of the dynamic linker. It's usually
181 not too hard to figure out where to put a breakpoint, but it
182 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
183 sanity checking. If it can't figure things out, returning zero and
184 getting the (possibly confusing) stepping behavior is better than
185 signalling an error, which will obscure the change in the
186 inferior's state. */
187
188 /* This function returns TRUE if pc is the address of an instruction
189 that lies within the dynamic linker (such as the event hook, or the
190 dld itself).
191
192 This function must be used only when a dynamic linker event has
193 been caught, and the inferior is being stepped out of the hook, or
194 undefined results are guaranteed. */
195
196 #ifndef SOLIB_IN_DYNAMIC_LINKER
197 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
198 #endif
199
200 /* "Observer mode" is somewhat like a more extreme version of
201 non-stop, in which all GDB operations that might affect the
202 target's execution have been disabled. */
203
204 static int non_stop_1 = 0;
205
206 int observer_mode = 0;
207 static int observer_mode_1 = 0;
208
209 static void
210 set_observer_mode (char *args, int from_tty,
211 struct cmd_list_element *c)
212 {
213 extern int pagination_enabled;
214
215 if (target_has_execution)
216 {
217 observer_mode_1 = observer_mode;
218 error (_("Cannot change this setting while the inferior is running."));
219 }
220
221 observer_mode = observer_mode_1;
222
223 may_write_registers = !observer_mode;
224 may_write_memory = !observer_mode;
225 may_insert_breakpoints = !observer_mode;
226 may_insert_tracepoints = !observer_mode;
227 /* We can insert fast tracepoints in or out of observer mode,
228 but enable them if we're going into this mode. */
229 if (observer_mode)
230 may_insert_fast_tracepoints = 1;
231 may_stop = !observer_mode;
232 update_target_permissions ();
233
234 /* Going *into* observer mode we must force non-stop, then
235 going out we leave it that way. */
236 if (observer_mode)
237 {
238 target_async_permitted = 1;
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that the target may silently handle.
287 This is automatically determined from the flags above,
288 and simply cached here. */
289 static unsigned char *signal_pass;
290
291 #define SET_SIGS(nsigs,sigs,flags) \
292 do { \
293 int signum = (nsigs); \
294 while (signum-- > 0) \
295 if ((sigs)[signum]) \
296 (flags)[signum] = 1; \
297 } while (0)
298
299 #define UNSET_SIGS(nsigs,sigs,flags) \
300 do { \
301 int signum = (nsigs); \
302 while (signum-- > 0) \
303 if ((sigs)[signum]) \
304 (flags)[signum] = 0; \
305 } while (0)
306
307 /* Value to pass to target_resume() to cause all threads to resume. */
308
309 #define RESUME_ALL minus_one_ptid
310
311 /* Command list pointer for the "stop" placeholder. */
312
313 static struct cmd_list_element *stop_command;
314
315 /* Function inferior was in as of last step command. */
316
317 static struct symbol *step_start_function;
318
319 /* Nonzero if we want to give control to the user when we're notified
320 of shared library events by the dynamic linker. */
321 int stop_on_solib_events;
322 static void
323 show_stop_on_solib_events (struct ui_file *file, int from_tty,
324 struct cmd_list_element *c, const char *value)
325 {
326 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
327 value);
328 }
329
330 /* Nonzero means expecting a trace trap
331 and should stop the inferior and return silently when it happens. */
332
333 int stop_after_trap;
334
335 /* Save register contents here when executing a "finish" command or are
336 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
337 Thus this contains the return value from the called function (assuming
338 values are returned in a register). */
339
340 struct regcache *stop_registers;
341
342 /* Nonzero after stop if current stack frame should be printed. */
343
344 static int stop_print_frame;
345
346 /* This is a cached copy of the pid/waitstatus of the last event
347 returned by target_wait()/deprecated_target_wait_hook(). This
348 information is returned by get_last_target_status(). */
349 static ptid_t target_last_wait_ptid;
350 static struct target_waitstatus target_last_waitstatus;
351
352 static void context_switch (ptid_t ptid);
353
354 void init_thread_stepping_state (struct thread_info *tss);
355
356 void init_infwait_state (void);
357
358 static const char follow_fork_mode_child[] = "child";
359 static const char follow_fork_mode_parent[] = "parent";
360
361 static const char *follow_fork_mode_kind_names[] = {
362 follow_fork_mode_child,
363 follow_fork_mode_parent,
364 NULL
365 };
366
367 static const char *follow_fork_mode_string = follow_fork_mode_parent;
368 static void
369 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
370 struct cmd_list_element *c, const char *value)
371 {
372 fprintf_filtered (file,
373 _("Debugger response to a program "
374 "call of fork or vfork is \"%s\".\n"),
375 value);
376 }
377 \f
378
379 /* Tell the target to follow the fork we're stopped at. Returns true
380 if the inferior should be resumed; false, if the target for some
381 reason decided it's best not to resume. */
382
383 static int
384 follow_fork (void)
385 {
386 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
387 int should_resume = 1;
388 struct thread_info *tp;
389
390 /* Copy user stepping state to the new inferior thread. FIXME: the
391 followed fork child thread should have a copy of most of the
392 parent thread structure's run control related fields, not just these.
393 Initialized to avoid "may be used uninitialized" warnings from gcc. */
394 struct breakpoint *step_resume_breakpoint = NULL;
395 struct breakpoint *exception_resume_breakpoint = NULL;
396 CORE_ADDR step_range_start = 0;
397 CORE_ADDR step_range_end = 0;
398 struct frame_id step_frame_id = { 0 };
399
400 if (!non_stop)
401 {
402 ptid_t wait_ptid;
403 struct target_waitstatus wait_status;
404
405 /* Get the last target status returned by target_wait(). */
406 get_last_target_status (&wait_ptid, &wait_status);
407
408 /* If not stopped at a fork event, then there's nothing else to
409 do. */
410 if (wait_status.kind != TARGET_WAITKIND_FORKED
411 && wait_status.kind != TARGET_WAITKIND_VFORKED)
412 return 1;
413
414 /* Check if we switched over from WAIT_PTID, since the event was
415 reported. */
416 if (!ptid_equal (wait_ptid, minus_one_ptid)
417 && !ptid_equal (inferior_ptid, wait_ptid))
418 {
419 /* We did. Switch back to WAIT_PTID thread, to tell the
420 target to follow it (in either direction). We'll
421 afterwards refuse to resume, and inform the user what
422 happened. */
423 switch_to_thread (wait_ptid);
424 should_resume = 0;
425 }
426 }
427
428 tp = inferior_thread ();
429
430 /* If there were any forks/vforks that were caught and are now to be
431 followed, then do so now. */
432 switch (tp->pending_follow.kind)
433 {
434 case TARGET_WAITKIND_FORKED:
435 case TARGET_WAITKIND_VFORKED:
436 {
437 ptid_t parent, child;
438
439 /* If the user did a next/step, etc, over a fork call,
440 preserve the stepping state in the fork child. */
441 if (follow_child && should_resume)
442 {
443 step_resume_breakpoint = clone_momentary_breakpoint
444 (tp->control.step_resume_breakpoint);
445 step_range_start = tp->control.step_range_start;
446 step_range_end = tp->control.step_range_end;
447 step_frame_id = tp->control.step_frame_id;
448 exception_resume_breakpoint
449 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
450
451 /* For now, delete the parent's sr breakpoint, otherwise,
452 parent/child sr breakpoints are considered duplicates,
453 and the child version will not be installed. Remove
454 this when the breakpoints module becomes aware of
455 inferiors and address spaces. */
456 delete_step_resume_breakpoint (tp);
457 tp->control.step_range_start = 0;
458 tp->control.step_range_end = 0;
459 tp->control.step_frame_id = null_frame_id;
460 delete_exception_resume_breakpoint (tp);
461 }
462
463 parent = inferior_ptid;
464 child = tp->pending_follow.value.related_pid;
465
466 /* Tell the target to do whatever is necessary to follow
467 either parent or child. */
468 if (target_follow_fork (follow_child))
469 {
470 /* Target refused to follow, or there's some other reason
471 we shouldn't resume. */
472 should_resume = 0;
473 }
474 else
475 {
476 /* This pending follow fork event is now handled, one way
477 or another. The previous selected thread may be gone
478 from the lists by now, but if it is still around, need
479 to clear the pending follow request. */
480 tp = find_thread_ptid (parent);
481 if (tp)
482 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
483
484 /* This makes sure we don't try to apply the "Switched
485 over from WAIT_PID" logic above. */
486 nullify_last_target_wait_ptid ();
487
488 /* If we followed the child, switch to it... */
489 if (follow_child)
490 {
491 switch_to_thread (child);
492
493 /* ... and preserve the stepping state, in case the
494 user was stepping over the fork call. */
495 if (should_resume)
496 {
497 tp = inferior_thread ();
498 tp->control.step_resume_breakpoint
499 = step_resume_breakpoint;
500 tp->control.step_range_start = step_range_start;
501 tp->control.step_range_end = step_range_end;
502 tp->control.step_frame_id = step_frame_id;
503 tp->control.exception_resume_breakpoint
504 = exception_resume_breakpoint;
505 }
506 else
507 {
508 /* If we get here, it was because we're trying to
509 resume from a fork catchpoint, but, the user
510 has switched threads away from the thread that
511 forked. In that case, the resume command
512 issued is most likely not applicable to the
513 child, so just warn, and refuse to resume. */
514 warning (_("Not resuming: switched threads "
515 "before following fork child.\n"));
516 }
517
518 /* Reset breakpoints in the child as appropriate. */
519 follow_inferior_reset_breakpoints ();
520 }
521 else
522 switch_to_thread (parent);
523 }
524 }
525 break;
526 case TARGET_WAITKIND_SPURIOUS:
527 /* Nothing to follow. */
528 break;
529 default:
530 internal_error (__FILE__, __LINE__,
531 "Unexpected pending_follow.kind %d\n",
532 tp->pending_follow.kind);
533 break;
534 }
535
536 return should_resume;
537 }
538
539 void
540 follow_inferior_reset_breakpoints (void)
541 {
542 struct thread_info *tp = inferior_thread ();
543
544 /* Was there a step_resume breakpoint? (There was if the user
545 did a "next" at the fork() call.) If so, explicitly reset its
546 thread number.
547
548 step_resumes are a form of bp that are made to be per-thread.
549 Since we created the step_resume bp when the parent process
550 was being debugged, and now are switching to the child process,
551 from the breakpoint package's viewpoint, that's a switch of
552 "threads". We must update the bp's notion of which thread
553 it is for, or it'll be ignored when it triggers. */
554
555 if (tp->control.step_resume_breakpoint)
556 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
557
558 if (tp->control.exception_resume_breakpoint)
559 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
560
561 /* Reinsert all breakpoints in the child. The user may have set
562 breakpoints after catching the fork, in which case those
563 were never set in the child, but only in the parent. This makes
564 sure the inserted breakpoints match the breakpoint list. */
565
566 breakpoint_re_set ();
567 insert_breakpoints ();
568 }
569
570 /* The child has exited or execed: resume threads of the parent the
571 user wanted to be executing. */
572
573 static int
574 proceed_after_vfork_done (struct thread_info *thread,
575 void *arg)
576 {
577 int pid = * (int *) arg;
578
579 if (ptid_get_pid (thread->ptid) == pid
580 && is_running (thread->ptid)
581 && !is_executing (thread->ptid)
582 && !thread->stop_requested
583 && thread->suspend.stop_signal == TARGET_SIGNAL_0)
584 {
585 if (debug_infrun)
586 fprintf_unfiltered (gdb_stdlog,
587 "infrun: resuming vfork parent thread %s\n",
588 target_pid_to_str (thread->ptid));
589
590 switch_to_thread (thread->ptid);
591 clear_proceed_status ();
592 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
593 }
594
595 return 0;
596 }
597
598 /* Called whenever we notice an exec or exit event, to handle
599 detaching or resuming a vfork parent. */
600
601 static void
602 handle_vfork_child_exec_or_exit (int exec)
603 {
604 struct inferior *inf = current_inferior ();
605
606 if (inf->vfork_parent)
607 {
608 int resume_parent = -1;
609
610 /* This exec or exit marks the end of the shared memory region
611 between the parent and the child. If the user wanted to
612 detach from the parent, now is the time. */
613
614 if (inf->vfork_parent->pending_detach)
615 {
616 struct thread_info *tp;
617 struct cleanup *old_chain;
618 struct program_space *pspace;
619 struct address_space *aspace;
620
621 /* follow-fork child, detach-on-fork on. */
622
623 old_chain = make_cleanup_restore_current_thread ();
624
625 /* We're letting loose of the parent. */
626 tp = any_live_thread_of_process (inf->vfork_parent->pid);
627 switch_to_thread (tp->ptid);
628
629 /* We're about to detach from the parent, which implicitly
630 removes breakpoints from its address space. There's a
631 catch here: we want to reuse the spaces for the child,
632 but, parent/child are still sharing the pspace at this
633 point, although the exec in reality makes the kernel give
634 the child a fresh set of new pages. The problem here is
635 that the breakpoints module being unaware of this, would
636 likely chose the child process to write to the parent
637 address space. Swapping the child temporarily away from
638 the spaces has the desired effect. Yes, this is "sort
639 of" a hack. */
640
641 pspace = inf->pspace;
642 aspace = inf->aspace;
643 inf->aspace = NULL;
644 inf->pspace = NULL;
645
646 if (debug_infrun || info_verbose)
647 {
648 target_terminal_ours ();
649
650 if (exec)
651 fprintf_filtered (gdb_stdlog,
652 "Detaching vfork parent process "
653 "%d after child exec.\n",
654 inf->vfork_parent->pid);
655 else
656 fprintf_filtered (gdb_stdlog,
657 "Detaching vfork parent process "
658 "%d after child exit.\n",
659 inf->vfork_parent->pid);
660 }
661
662 target_detach (NULL, 0);
663
664 /* Put it back. */
665 inf->pspace = pspace;
666 inf->aspace = aspace;
667
668 do_cleanups (old_chain);
669 }
670 else if (exec)
671 {
672 /* We're staying attached to the parent, so, really give the
673 child a new address space. */
674 inf->pspace = add_program_space (maybe_new_address_space ());
675 inf->aspace = inf->pspace->aspace;
676 inf->removable = 1;
677 set_current_program_space (inf->pspace);
678
679 resume_parent = inf->vfork_parent->pid;
680
681 /* Break the bonds. */
682 inf->vfork_parent->vfork_child = NULL;
683 }
684 else
685 {
686 struct cleanup *old_chain;
687 struct program_space *pspace;
688
689 /* If this is a vfork child exiting, then the pspace and
690 aspaces were shared with the parent. Since we're
691 reporting the process exit, we'll be mourning all that is
692 found in the address space, and switching to null_ptid,
693 preparing to start a new inferior. But, since we don't
694 want to clobber the parent's address/program spaces, we
695 go ahead and create a new one for this exiting
696 inferior. */
697
698 /* Switch to null_ptid, so that clone_program_space doesn't want
699 to read the selected frame of a dead process. */
700 old_chain = save_inferior_ptid ();
701 inferior_ptid = null_ptid;
702
703 /* This inferior is dead, so avoid giving the breakpoints
704 module the option to write through to it (cloning a
705 program space resets breakpoints). */
706 inf->aspace = NULL;
707 inf->pspace = NULL;
708 pspace = add_program_space (maybe_new_address_space ());
709 set_current_program_space (pspace);
710 inf->removable = 1;
711 clone_program_space (pspace, inf->vfork_parent->pspace);
712 inf->pspace = pspace;
713 inf->aspace = pspace->aspace;
714
715 /* Put back inferior_ptid. We'll continue mourning this
716 inferior. */
717 do_cleanups (old_chain);
718
719 resume_parent = inf->vfork_parent->pid;
720 /* Break the bonds. */
721 inf->vfork_parent->vfork_child = NULL;
722 }
723
724 inf->vfork_parent = NULL;
725
726 gdb_assert (current_program_space == inf->pspace);
727
728 if (non_stop && resume_parent != -1)
729 {
730 /* If the user wanted the parent to be running, let it go
731 free now. */
732 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
733
734 if (debug_infrun)
735 fprintf_unfiltered (gdb_stdlog,
736 "infrun: resuming vfork parent process %d\n",
737 resume_parent);
738
739 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
740
741 do_cleanups (old_chain);
742 }
743 }
744 }
745
746 /* Enum strings for "set|show displaced-stepping". */
747
748 static const char follow_exec_mode_new[] = "new";
749 static const char follow_exec_mode_same[] = "same";
750 static const char *follow_exec_mode_names[] =
751 {
752 follow_exec_mode_new,
753 follow_exec_mode_same,
754 NULL,
755 };
756
757 static const char *follow_exec_mode_string = follow_exec_mode_same;
758 static void
759 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
760 struct cmd_list_element *c, const char *value)
761 {
762 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
763 }
764
765 /* EXECD_PATHNAME is assumed to be non-NULL. */
766
767 static void
768 follow_exec (ptid_t pid, char *execd_pathname)
769 {
770 struct thread_info *th = inferior_thread ();
771 struct inferior *inf = current_inferior ();
772
773 /* This is an exec event that we actually wish to pay attention to.
774 Refresh our symbol table to the newly exec'd program, remove any
775 momentary bp's, etc.
776
777 If there are breakpoints, they aren't really inserted now,
778 since the exec() transformed our inferior into a fresh set
779 of instructions.
780
781 We want to preserve symbolic breakpoints on the list, since
782 we have hopes that they can be reset after the new a.out's
783 symbol table is read.
784
785 However, any "raw" breakpoints must be removed from the list
786 (e.g., the solib bp's), since their address is probably invalid
787 now.
788
789 And, we DON'T want to call delete_breakpoints() here, since
790 that may write the bp's "shadow contents" (the instruction
791 value that was overwritten witha TRAP instruction). Since
792 we now have a new a.out, those shadow contents aren't valid. */
793
794 mark_breakpoints_out ();
795
796 update_breakpoints_after_exec ();
797
798 /* If there was one, it's gone now. We cannot truly step-to-next
799 statement through an exec(). */
800 th->control.step_resume_breakpoint = NULL;
801 th->control.exception_resume_breakpoint = NULL;
802 th->control.step_range_start = 0;
803 th->control.step_range_end = 0;
804
805 /* The target reports the exec event to the main thread, even if
806 some other thread does the exec, and even if the main thread was
807 already stopped --- if debugging in non-stop mode, it's possible
808 the user had the main thread held stopped in the previous image
809 --- release it now. This is the same behavior as step-over-exec
810 with scheduler-locking on in all-stop mode. */
811 th->stop_requested = 0;
812
813 /* What is this a.out's name? */
814 printf_unfiltered (_("%s is executing new program: %s\n"),
815 target_pid_to_str (inferior_ptid),
816 execd_pathname);
817
818 /* We've followed the inferior through an exec. Therefore, the
819 inferior has essentially been killed & reborn. */
820
821 gdb_flush (gdb_stdout);
822
823 breakpoint_init_inferior (inf_execd);
824
825 if (gdb_sysroot && *gdb_sysroot)
826 {
827 char *name = alloca (strlen (gdb_sysroot)
828 + strlen (execd_pathname)
829 + 1);
830
831 strcpy (name, gdb_sysroot);
832 strcat (name, execd_pathname);
833 execd_pathname = name;
834 }
835
836 /* Reset the shared library package. This ensures that we get a
837 shlib event when the child reaches "_start", at which point the
838 dld will have had a chance to initialize the child. */
839 /* Also, loading a symbol file below may trigger symbol lookups, and
840 we don't want those to be satisfied by the libraries of the
841 previous incarnation of this process. */
842 no_shared_libraries (NULL, 0);
843
844 if (follow_exec_mode_string == follow_exec_mode_new)
845 {
846 struct program_space *pspace;
847
848 /* The user wants to keep the old inferior and program spaces
849 around. Create a new fresh one, and switch to it. */
850
851 inf = add_inferior (current_inferior ()->pid);
852 pspace = add_program_space (maybe_new_address_space ());
853 inf->pspace = pspace;
854 inf->aspace = pspace->aspace;
855
856 exit_inferior_num_silent (current_inferior ()->num);
857
858 set_current_inferior (inf);
859 set_current_program_space (pspace);
860 }
861
862 gdb_assert (current_program_space == inf->pspace);
863
864 /* That a.out is now the one to use. */
865 exec_file_attach (execd_pathname, 0);
866
867 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
868 (Position Independent Executable) main symbol file will get applied by
869 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
870 the breakpoints with the zero displacement. */
871
872 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
873 NULL, 0);
874
875 set_initial_language ();
876
877 #ifdef SOLIB_CREATE_INFERIOR_HOOK
878 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
879 #else
880 solib_create_inferior_hook (0);
881 #endif
882
883 jit_inferior_created_hook ();
884
885 breakpoint_re_set ();
886
887 /* Reinsert all breakpoints. (Those which were symbolic have
888 been reset to the proper address in the new a.out, thanks
889 to symbol_file_command...). */
890 insert_breakpoints ();
891
892 /* The next resume of this inferior should bring it to the shlib
893 startup breakpoints. (If the user had also set bp's on
894 "main" from the old (parent) process, then they'll auto-
895 matically get reset there in the new process.). */
896 }
897
898 /* Non-zero if we just simulating a single-step. This is needed
899 because we cannot remove the breakpoints in the inferior process
900 until after the `wait' in `wait_for_inferior'. */
901 static int singlestep_breakpoints_inserted_p = 0;
902
903 /* The thread we inserted single-step breakpoints for. */
904 static ptid_t singlestep_ptid;
905
906 /* PC when we started this single-step. */
907 static CORE_ADDR singlestep_pc;
908
909 /* If another thread hit the singlestep breakpoint, we save the original
910 thread here so that we can resume single-stepping it later. */
911 static ptid_t saved_singlestep_ptid;
912 static int stepping_past_singlestep_breakpoint;
913
914 /* If not equal to null_ptid, this means that after stepping over breakpoint
915 is finished, we need to switch to deferred_step_ptid, and step it.
916
917 The use case is when one thread has hit a breakpoint, and then the user
918 has switched to another thread and issued 'step'. We need to step over
919 breakpoint in the thread which hit the breakpoint, but then continue
920 stepping the thread user has selected. */
921 static ptid_t deferred_step_ptid;
922 \f
923 /* Displaced stepping. */
924
925 /* In non-stop debugging mode, we must take special care to manage
926 breakpoints properly; in particular, the traditional strategy for
927 stepping a thread past a breakpoint it has hit is unsuitable.
928 'Displaced stepping' is a tactic for stepping one thread past a
929 breakpoint it has hit while ensuring that other threads running
930 concurrently will hit the breakpoint as they should.
931
932 The traditional way to step a thread T off a breakpoint in a
933 multi-threaded program in all-stop mode is as follows:
934
935 a0) Initially, all threads are stopped, and breakpoints are not
936 inserted.
937 a1) We single-step T, leaving breakpoints uninserted.
938 a2) We insert breakpoints, and resume all threads.
939
940 In non-stop debugging, however, this strategy is unsuitable: we
941 don't want to have to stop all threads in the system in order to
942 continue or step T past a breakpoint. Instead, we use displaced
943 stepping:
944
945 n0) Initially, T is stopped, other threads are running, and
946 breakpoints are inserted.
947 n1) We copy the instruction "under" the breakpoint to a separate
948 location, outside the main code stream, making any adjustments
949 to the instruction, register, and memory state as directed by
950 T's architecture.
951 n2) We single-step T over the instruction at its new location.
952 n3) We adjust the resulting register and memory state as directed
953 by T's architecture. This includes resetting T's PC to point
954 back into the main instruction stream.
955 n4) We resume T.
956
957 This approach depends on the following gdbarch methods:
958
959 - gdbarch_max_insn_length and gdbarch_displaced_step_location
960 indicate where to copy the instruction, and how much space must
961 be reserved there. We use these in step n1.
962
963 - gdbarch_displaced_step_copy_insn copies a instruction to a new
964 address, and makes any necessary adjustments to the instruction,
965 register contents, and memory. We use this in step n1.
966
967 - gdbarch_displaced_step_fixup adjusts registers and memory after
968 we have successfuly single-stepped the instruction, to yield the
969 same effect the instruction would have had if we had executed it
970 at its original address. We use this in step n3.
971
972 - gdbarch_displaced_step_free_closure provides cleanup.
973
974 The gdbarch_displaced_step_copy_insn and
975 gdbarch_displaced_step_fixup functions must be written so that
976 copying an instruction with gdbarch_displaced_step_copy_insn,
977 single-stepping across the copied instruction, and then applying
978 gdbarch_displaced_insn_fixup should have the same effects on the
979 thread's memory and registers as stepping the instruction in place
980 would have. Exactly which responsibilities fall to the copy and
981 which fall to the fixup is up to the author of those functions.
982
983 See the comments in gdbarch.sh for details.
984
985 Note that displaced stepping and software single-step cannot
986 currently be used in combination, although with some care I think
987 they could be made to. Software single-step works by placing
988 breakpoints on all possible subsequent instructions; if the
989 displaced instruction is a PC-relative jump, those breakpoints
990 could fall in very strange places --- on pages that aren't
991 executable, or at addresses that are not proper instruction
992 boundaries. (We do generally let other threads run while we wait
993 to hit the software single-step breakpoint, and they might
994 encounter such a corrupted instruction.) One way to work around
995 this would be to have gdbarch_displaced_step_copy_insn fully
996 simulate the effect of PC-relative instructions (and return NULL)
997 on architectures that use software single-stepping.
998
999 In non-stop mode, we can have independent and simultaneous step
1000 requests, so more than one thread may need to simultaneously step
1001 over a breakpoint. The current implementation assumes there is
1002 only one scratch space per process. In this case, we have to
1003 serialize access to the scratch space. If thread A wants to step
1004 over a breakpoint, but we are currently waiting for some other
1005 thread to complete a displaced step, we leave thread A stopped and
1006 place it in the displaced_step_request_queue. Whenever a displaced
1007 step finishes, we pick the next thread in the queue and start a new
1008 displaced step operation on it. See displaced_step_prepare and
1009 displaced_step_fixup for details. */
1010
1011 struct displaced_step_request
1012 {
1013 ptid_t ptid;
1014 struct displaced_step_request *next;
1015 };
1016
1017 /* Per-inferior displaced stepping state. */
1018 struct displaced_step_inferior_state
1019 {
1020 /* Pointer to next in linked list. */
1021 struct displaced_step_inferior_state *next;
1022
1023 /* The process this displaced step state refers to. */
1024 int pid;
1025
1026 /* A queue of pending displaced stepping requests. One entry per
1027 thread that needs to do a displaced step. */
1028 struct displaced_step_request *step_request_queue;
1029
1030 /* If this is not null_ptid, this is the thread carrying out a
1031 displaced single-step in process PID. This thread's state will
1032 require fixing up once it has completed its step. */
1033 ptid_t step_ptid;
1034
1035 /* The architecture the thread had when we stepped it. */
1036 struct gdbarch *step_gdbarch;
1037
1038 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1039 for post-step cleanup. */
1040 struct displaced_step_closure *step_closure;
1041
1042 /* The address of the original instruction, and the copy we
1043 made. */
1044 CORE_ADDR step_original, step_copy;
1045
1046 /* Saved contents of copy area. */
1047 gdb_byte *step_saved_copy;
1048 };
1049
1050 /* The list of states of processes involved in displaced stepping
1051 presently. */
1052 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1053
1054 /* Get the displaced stepping state of process PID. */
1055
1056 static struct displaced_step_inferior_state *
1057 get_displaced_stepping_state (int pid)
1058 {
1059 struct displaced_step_inferior_state *state;
1060
1061 for (state = displaced_step_inferior_states;
1062 state != NULL;
1063 state = state->next)
1064 if (state->pid == pid)
1065 return state;
1066
1067 return NULL;
1068 }
1069
1070 /* Add a new displaced stepping state for process PID to the displaced
1071 stepping state list, or return a pointer to an already existing
1072 entry, if it already exists. Never returns NULL. */
1073
1074 static struct displaced_step_inferior_state *
1075 add_displaced_stepping_state (int pid)
1076 {
1077 struct displaced_step_inferior_state *state;
1078
1079 for (state = displaced_step_inferior_states;
1080 state != NULL;
1081 state = state->next)
1082 if (state->pid == pid)
1083 return state;
1084
1085 state = xcalloc (1, sizeof (*state));
1086 state->pid = pid;
1087 state->next = displaced_step_inferior_states;
1088 displaced_step_inferior_states = state;
1089
1090 return state;
1091 }
1092
1093 /* If inferior is in displaced stepping, and ADDR equals to starting address
1094 of copy area, return corresponding displaced_step_closure. Otherwise,
1095 return NULL. */
1096
1097 struct displaced_step_closure*
1098 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1099 {
1100 struct displaced_step_inferior_state *displaced
1101 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1102
1103 /* If checking the mode of displaced instruction in copy area. */
1104 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1105 && (displaced->step_copy == addr))
1106 return displaced->step_closure;
1107
1108 return NULL;
1109 }
1110
1111 /* Remove the displaced stepping state of process PID. */
1112
1113 static void
1114 remove_displaced_stepping_state (int pid)
1115 {
1116 struct displaced_step_inferior_state *it, **prev_next_p;
1117
1118 gdb_assert (pid != 0);
1119
1120 it = displaced_step_inferior_states;
1121 prev_next_p = &displaced_step_inferior_states;
1122 while (it)
1123 {
1124 if (it->pid == pid)
1125 {
1126 *prev_next_p = it->next;
1127 xfree (it);
1128 return;
1129 }
1130
1131 prev_next_p = &it->next;
1132 it = *prev_next_p;
1133 }
1134 }
1135
1136 static void
1137 infrun_inferior_exit (struct inferior *inf)
1138 {
1139 remove_displaced_stepping_state (inf->pid);
1140 }
1141
1142 /* Enum strings for "set|show displaced-stepping". */
1143
1144 static const char can_use_displaced_stepping_auto[] = "auto";
1145 static const char can_use_displaced_stepping_on[] = "on";
1146 static const char can_use_displaced_stepping_off[] = "off";
1147 static const char *can_use_displaced_stepping_enum[] =
1148 {
1149 can_use_displaced_stepping_auto,
1150 can_use_displaced_stepping_on,
1151 can_use_displaced_stepping_off,
1152 NULL,
1153 };
1154
1155 /* If ON, and the architecture supports it, GDB will use displaced
1156 stepping to step over breakpoints. If OFF, or if the architecture
1157 doesn't support it, GDB will instead use the traditional
1158 hold-and-step approach. If AUTO (which is the default), GDB will
1159 decide which technique to use to step over breakpoints depending on
1160 which of all-stop or non-stop mode is active --- displaced stepping
1161 in non-stop mode; hold-and-step in all-stop mode. */
1162
1163 static const char *can_use_displaced_stepping =
1164 can_use_displaced_stepping_auto;
1165
1166 static void
1167 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1168 struct cmd_list_element *c,
1169 const char *value)
1170 {
1171 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1172 fprintf_filtered (file,
1173 _("Debugger's willingness to use displaced stepping "
1174 "to step over breakpoints is %s (currently %s).\n"),
1175 value, non_stop ? "on" : "off");
1176 else
1177 fprintf_filtered (file,
1178 _("Debugger's willingness to use displaced stepping "
1179 "to step over breakpoints is %s.\n"), value);
1180 }
1181
1182 /* Return non-zero if displaced stepping can/should be used to step
1183 over breakpoints. */
1184
1185 static int
1186 use_displaced_stepping (struct gdbarch *gdbarch)
1187 {
1188 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1189 && non_stop)
1190 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1191 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1192 && !RECORD_IS_USED);
1193 }
1194
1195 /* Clean out any stray displaced stepping state. */
1196 static void
1197 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1198 {
1199 /* Indicate that there is no cleanup pending. */
1200 displaced->step_ptid = null_ptid;
1201
1202 if (displaced->step_closure)
1203 {
1204 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1205 displaced->step_closure);
1206 displaced->step_closure = NULL;
1207 }
1208 }
1209
1210 static void
1211 displaced_step_clear_cleanup (void *arg)
1212 {
1213 struct displaced_step_inferior_state *state = arg;
1214
1215 displaced_step_clear (state);
1216 }
1217
1218 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1219 void
1220 displaced_step_dump_bytes (struct ui_file *file,
1221 const gdb_byte *buf,
1222 size_t len)
1223 {
1224 int i;
1225
1226 for (i = 0; i < len; i++)
1227 fprintf_unfiltered (file, "%02x ", buf[i]);
1228 fputs_unfiltered ("\n", file);
1229 }
1230
1231 /* Prepare to single-step, using displaced stepping.
1232
1233 Note that we cannot use displaced stepping when we have a signal to
1234 deliver. If we have a signal to deliver and an instruction to step
1235 over, then after the step, there will be no indication from the
1236 target whether the thread entered a signal handler or ignored the
1237 signal and stepped over the instruction successfully --- both cases
1238 result in a simple SIGTRAP. In the first case we mustn't do a
1239 fixup, and in the second case we must --- but we can't tell which.
1240 Comments in the code for 'random signals' in handle_inferior_event
1241 explain how we handle this case instead.
1242
1243 Returns 1 if preparing was successful -- this thread is going to be
1244 stepped now; or 0 if displaced stepping this thread got queued. */
1245 static int
1246 displaced_step_prepare (ptid_t ptid)
1247 {
1248 struct cleanup *old_cleanups, *ignore_cleanups;
1249 struct regcache *regcache = get_thread_regcache (ptid);
1250 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1251 CORE_ADDR original, copy;
1252 ULONGEST len;
1253 struct displaced_step_closure *closure;
1254 struct displaced_step_inferior_state *displaced;
1255
1256 /* We should never reach this function if the architecture does not
1257 support displaced stepping. */
1258 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1259
1260 /* We have to displaced step one thread at a time, as we only have
1261 access to a single scratch space per inferior. */
1262
1263 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1264
1265 if (!ptid_equal (displaced->step_ptid, null_ptid))
1266 {
1267 /* Already waiting for a displaced step to finish. Defer this
1268 request and place in queue. */
1269 struct displaced_step_request *req, *new_req;
1270
1271 if (debug_displaced)
1272 fprintf_unfiltered (gdb_stdlog,
1273 "displaced: defering step of %s\n",
1274 target_pid_to_str (ptid));
1275
1276 new_req = xmalloc (sizeof (*new_req));
1277 new_req->ptid = ptid;
1278 new_req->next = NULL;
1279
1280 if (displaced->step_request_queue)
1281 {
1282 for (req = displaced->step_request_queue;
1283 req && req->next;
1284 req = req->next)
1285 ;
1286 req->next = new_req;
1287 }
1288 else
1289 displaced->step_request_queue = new_req;
1290
1291 return 0;
1292 }
1293 else
1294 {
1295 if (debug_displaced)
1296 fprintf_unfiltered (gdb_stdlog,
1297 "displaced: stepping %s now\n",
1298 target_pid_to_str (ptid));
1299 }
1300
1301 displaced_step_clear (displaced);
1302
1303 old_cleanups = save_inferior_ptid ();
1304 inferior_ptid = ptid;
1305
1306 original = regcache_read_pc (regcache);
1307
1308 copy = gdbarch_displaced_step_location (gdbarch);
1309 len = gdbarch_max_insn_length (gdbarch);
1310
1311 /* Save the original contents of the copy area. */
1312 displaced->step_saved_copy = xmalloc (len);
1313 ignore_cleanups = make_cleanup (free_current_contents,
1314 &displaced->step_saved_copy);
1315 read_memory (copy, displaced->step_saved_copy, len);
1316 if (debug_displaced)
1317 {
1318 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1319 paddress (gdbarch, copy));
1320 displaced_step_dump_bytes (gdb_stdlog,
1321 displaced->step_saved_copy,
1322 len);
1323 };
1324
1325 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1326 original, copy, regcache);
1327
1328 /* We don't support the fully-simulated case at present. */
1329 gdb_assert (closure);
1330
1331 /* Save the information we need to fix things up if the step
1332 succeeds. */
1333 displaced->step_ptid = ptid;
1334 displaced->step_gdbarch = gdbarch;
1335 displaced->step_closure = closure;
1336 displaced->step_original = original;
1337 displaced->step_copy = copy;
1338
1339 make_cleanup (displaced_step_clear_cleanup, displaced);
1340
1341 /* Resume execution at the copy. */
1342 regcache_write_pc (regcache, copy);
1343
1344 discard_cleanups (ignore_cleanups);
1345
1346 do_cleanups (old_cleanups);
1347
1348 if (debug_displaced)
1349 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1350 paddress (gdbarch, copy));
1351
1352 return 1;
1353 }
1354
1355 static void
1356 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1357 const gdb_byte *myaddr, int len)
1358 {
1359 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1360
1361 inferior_ptid = ptid;
1362 write_memory (memaddr, myaddr, len);
1363 do_cleanups (ptid_cleanup);
1364 }
1365
1366 static void
1367 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1368 {
1369 struct cleanup *old_cleanups;
1370 struct displaced_step_inferior_state *displaced
1371 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1372
1373 /* Was any thread of this process doing a displaced step? */
1374 if (displaced == NULL)
1375 return;
1376
1377 /* Was this event for the pid we displaced? */
1378 if (ptid_equal (displaced->step_ptid, null_ptid)
1379 || ! ptid_equal (displaced->step_ptid, event_ptid))
1380 return;
1381
1382 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1383
1384 /* Restore the contents of the copy area. */
1385 {
1386 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1387
1388 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1389 displaced->step_saved_copy, len);
1390 if (debug_displaced)
1391 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1392 paddress (displaced->step_gdbarch,
1393 displaced->step_copy));
1394 }
1395
1396 /* Did the instruction complete successfully? */
1397 if (signal == TARGET_SIGNAL_TRAP)
1398 {
1399 /* Fix up the resulting state. */
1400 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1401 displaced->step_closure,
1402 displaced->step_original,
1403 displaced->step_copy,
1404 get_thread_regcache (displaced->step_ptid));
1405 }
1406 else
1407 {
1408 /* Since the instruction didn't complete, all we can do is
1409 relocate the PC. */
1410 struct regcache *regcache = get_thread_regcache (event_ptid);
1411 CORE_ADDR pc = regcache_read_pc (regcache);
1412
1413 pc = displaced->step_original + (pc - displaced->step_copy);
1414 regcache_write_pc (regcache, pc);
1415 }
1416
1417 do_cleanups (old_cleanups);
1418
1419 displaced->step_ptid = null_ptid;
1420
1421 /* Are there any pending displaced stepping requests? If so, run
1422 one now. Leave the state object around, since we're likely to
1423 need it again soon. */
1424 while (displaced->step_request_queue)
1425 {
1426 struct displaced_step_request *head;
1427 ptid_t ptid;
1428 struct regcache *regcache;
1429 struct gdbarch *gdbarch;
1430 CORE_ADDR actual_pc;
1431 struct address_space *aspace;
1432
1433 head = displaced->step_request_queue;
1434 ptid = head->ptid;
1435 displaced->step_request_queue = head->next;
1436 xfree (head);
1437
1438 context_switch (ptid);
1439
1440 regcache = get_thread_regcache (ptid);
1441 actual_pc = regcache_read_pc (regcache);
1442 aspace = get_regcache_aspace (regcache);
1443
1444 if (breakpoint_here_p (aspace, actual_pc))
1445 {
1446 if (debug_displaced)
1447 fprintf_unfiltered (gdb_stdlog,
1448 "displaced: stepping queued %s now\n",
1449 target_pid_to_str (ptid));
1450
1451 displaced_step_prepare (ptid);
1452
1453 gdbarch = get_regcache_arch (regcache);
1454
1455 if (debug_displaced)
1456 {
1457 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1458 gdb_byte buf[4];
1459
1460 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1461 paddress (gdbarch, actual_pc));
1462 read_memory (actual_pc, buf, sizeof (buf));
1463 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1464 }
1465
1466 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1467 displaced->step_closure))
1468 target_resume (ptid, 1, TARGET_SIGNAL_0);
1469 else
1470 target_resume (ptid, 0, TARGET_SIGNAL_0);
1471
1472 /* Done, we're stepping a thread. */
1473 break;
1474 }
1475 else
1476 {
1477 int step;
1478 struct thread_info *tp = inferior_thread ();
1479
1480 /* The breakpoint we were sitting under has since been
1481 removed. */
1482 tp->control.trap_expected = 0;
1483
1484 /* Go back to what we were trying to do. */
1485 step = currently_stepping (tp);
1486
1487 if (debug_displaced)
1488 fprintf_unfiltered (gdb_stdlog,
1489 "breakpoint is gone %s: step(%d)\n",
1490 target_pid_to_str (tp->ptid), step);
1491
1492 target_resume (ptid, step, TARGET_SIGNAL_0);
1493 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1494
1495 /* This request was discarded. See if there's any other
1496 thread waiting for its turn. */
1497 }
1498 }
1499 }
1500
1501 /* Update global variables holding ptids to hold NEW_PTID if they were
1502 holding OLD_PTID. */
1503 static void
1504 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1505 {
1506 struct displaced_step_request *it;
1507 struct displaced_step_inferior_state *displaced;
1508
1509 if (ptid_equal (inferior_ptid, old_ptid))
1510 inferior_ptid = new_ptid;
1511
1512 if (ptid_equal (singlestep_ptid, old_ptid))
1513 singlestep_ptid = new_ptid;
1514
1515 if (ptid_equal (deferred_step_ptid, old_ptid))
1516 deferred_step_ptid = new_ptid;
1517
1518 for (displaced = displaced_step_inferior_states;
1519 displaced;
1520 displaced = displaced->next)
1521 {
1522 if (ptid_equal (displaced->step_ptid, old_ptid))
1523 displaced->step_ptid = new_ptid;
1524
1525 for (it = displaced->step_request_queue; it; it = it->next)
1526 if (ptid_equal (it->ptid, old_ptid))
1527 it->ptid = new_ptid;
1528 }
1529 }
1530
1531 \f
1532 /* Resuming. */
1533
1534 /* Things to clean up if we QUIT out of resume (). */
1535 static void
1536 resume_cleanups (void *ignore)
1537 {
1538 normal_stop ();
1539 }
1540
1541 static const char schedlock_off[] = "off";
1542 static const char schedlock_on[] = "on";
1543 static const char schedlock_step[] = "step";
1544 static const char *scheduler_enums[] = {
1545 schedlock_off,
1546 schedlock_on,
1547 schedlock_step,
1548 NULL
1549 };
1550 static const char *scheduler_mode = schedlock_off;
1551 static void
1552 show_scheduler_mode (struct ui_file *file, int from_tty,
1553 struct cmd_list_element *c, const char *value)
1554 {
1555 fprintf_filtered (file,
1556 _("Mode for locking scheduler "
1557 "during execution is \"%s\".\n"),
1558 value);
1559 }
1560
1561 static void
1562 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1563 {
1564 if (!target_can_lock_scheduler)
1565 {
1566 scheduler_mode = schedlock_off;
1567 error (_("Target '%s' cannot support this command."), target_shortname);
1568 }
1569 }
1570
1571 /* True if execution commands resume all threads of all processes by
1572 default; otherwise, resume only threads of the current inferior
1573 process. */
1574 int sched_multi = 0;
1575
1576 /* Try to setup for software single stepping over the specified location.
1577 Return 1 if target_resume() should use hardware single step.
1578
1579 GDBARCH the current gdbarch.
1580 PC the location to step over. */
1581
1582 static int
1583 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1584 {
1585 int hw_step = 1;
1586
1587 if (execution_direction == EXEC_FORWARD
1588 && gdbarch_software_single_step_p (gdbarch)
1589 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1590 {
1591 hw_step = 0;
1592 /* Do not pull these breakpoints until after a `wait' in
1593 `wait_for_inferior'. */
1594 singlestep_breakpoints_inserted_p = 1;
1595 singlestep_ptid = inferior_ptid;
1596 singlestep_pc = pc;
1597 }
1598 return hw_step;
1599 }
1600
1601 /* Return a ptid representing the set of threads that we will proceed,
1602 in the perspective of the user/frontend. We may actually resume
1603 fewer threads at first, e.g., if a thread is stopped at a
1604 breakpoint that needs stepping-off, but that should not be visible
1605 to the user/frontend, and neither should the frontend/user be
1606 allowed to proceed any of the threads that happen to be stopped for
1607 internal run control handling, if a previous command wanted them
1608 resumed. */
1609
1610 ptid_t
1611 user_visible_resume_ptid (int step)
1612 {
1613 /* By default, resume all threads of all processes. */
1614 ptid_t resume_ptid = RESUME_ALL;
1615
1616 /* Maybe resume only all threads of the current process. */
1617 if (!sched_multi && target_supports_multi_process ())
1618 {
1619 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1620 }
1621
1622 /* Maybe resume a single thread after all. */
1623 if (non_stop)
1624 {
1625 /* With non-stop mode on, threads are always handled
1626 individually. */
1627 resume_ptid = inferior_ptid;
1628 }
1629 else if ((scheduler_mode == schedlock_on)
1630 || (scheduler_mode == schedlock_step
1631 && (step || singlestep_breakpoints_inserted_p)))
1632 {
1633 /* User-settable 'scheduler' mode requires solo thread resume. */
1634 resume_ptid = inferior_ptid;
1635 }
1636
1637 return resume_ptid;
1638 }
1639
1640 /* Resume the inferior, but allow a QUIT. This is useful if the user
1641 wants to interrupt some lengthy single-stepping operation
1642 (for child processes, the SIGINT goes to the inferior, and so
1643 we get a SIGINT random_signal, but for remote debugging and perhaps
1644 other targets, that's not true).
1645
1646 STEP nonzero if we should step (zero to continue instead).
1647 SIG is the signal to give the inferior (zero for none). */
1648 void
1649 resume (int step, enum target_signal sig)
1650 {
1651 int should_resume = 1;
1652 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1653 struct regcache *regcache = get_current_regcache ();
1654 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1655 struct thread_info *tp = inferior_thread ();
1656 CORE_ADDR pc = regcache_read_pc (regcache);
1657 struct address_space *aspace = get_regcache_aspace (regcache);
1658
1659 QUIT;
1660
1661 if (current_inferior ()->waiting_for_vfork_done)
1662 {
1663 /* Don't try to single-step a vfork parent that is waiting for
1664 the child to get out of the shared memory region (by exec'ing
1665 or exiting). This is particularly important on software
1666 single-step archs, as the child process would trip on the
1667 software single step breakpoint inserted for the parent
1668 process. Since the parent will not actually execute any
1669 instruction until the child is out of the shared region (such
1670 are vfork's semantics), it is safe to simply continue it.
1671 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1672 the parent, and tell it to `keep_going', which automatically
1673 re-sets it stepping. */
1674 if (debug_infrun)
1675 fprintf_unfiltered (gdb_stdlog,
1676 "infrun: resume : clear step\n");
1677 step = 0;
1678 }
1679
1680 if (debug_infrun)
1681 fprintf_unfiltered (gdb_stdlog,
1682 "infrun: resume (step=%d, signal=%d), "
1683 "trap_expected=%d, current thread [%s] at %s\n",
1684 step, sig, tp->control.trap_expected,
1685 target_pid_to_str (inferior_ptid),
1686 paddress (gdbarch, pc));
1687
1688 /* Normally, by the time we reach `resume', the breakpoints are either
1689 removed or inserted, as appropriate. The exception is if we're sitting
1690 at a permanent breakpoint; we need to step over it, but permanent
1691 breakpoints can't be removed. So we have to test for it here. */
1692 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1693 {
1694 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1695 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1696 else
1697 error (_("\
1698 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1699 how to step past a permanent breakpoint on this architecture. Try using\n\
1700 a command like `return' or `jump' to continue execution."));
1701 }
1702
1703 /* If enabled, step over breakpoints by executing a copy of the
1704 instruction at a different address.
1705
1706 We can't use displaced stepping when we have a signal to deliver;
1707 the comments for displaced_step_prepare explain why. The
1708 comments in the handle_inferior event for dealing with 'random
1709 signals' explain what we do instead.
1710
1711 We can't use displaced stepping when we are waiting for vfork_done
1712 event, displaced stepping breaks the vfork child similarly as single
1713 step software breakpoint. */
1714 if (use_displaced_stepping (gdbarch)
1715 && (tp->control.trap_expected
1716 || (step && gdbarch_software_single_step_p (gdbarch)))
1717 && sig == TARGET_SIGNAL_0
1718 && !current_inferior ()->waiting_for_vfork_done)
1719 {
1720 struct displaced_step_inferior_state *displaced;
1721
1722 if (!displaced_step_prepare (inferior_ptid))
1723 {
1724 /* Got placed in displaced stepping queue. Will be resumed
1725 later when all the currently queued displaced stepping
1726 requests finish. The thread is not executing at this point,
1727 and the call to set_executing will be made later. But we
1728 need to call set_running here, since from frontend point of view,
1729 the thread is running. */
1730 set_running (inferior_ptid, 1);
1731 discard_cleanups (old_cleanups);
1732 return;
1733 }
1734
1735 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1736 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1737 displaced->step_closure);
1738 }
1739
1740 /* Do we need to do it the hard way, w/temp breakpoints? */
1741 else if (step)
1742 step = maybe_software_singlestep (gdbarch, pc);
1743
1744 /* Currently, our software single-step implementation leads to different
1745 results than hardware single-stepping in one situation: when stepping
1746 into delivering a signal which has an associated signal handler,
1747 hardware single-step will stop at the first instruction of the handler,
1748 while software single-step will simply skip execution of the handler.
1749
1750 For now, this difference in behavior is accepted since there is no
1751 easy way to actually implement single-stepping into a signal handler
1752 without kernel support.
1753
1754 However, there is one scenario where this difference leads to follow-on
1755 problems: if we're stepping off a breakpoint by removing all breakpoints
1756 and then single-stepping. In this case, the software single-step
1757 behavior means that even if there is a *breakpoint* in the signal
1758 handler, GDB still would not stop.
1759
1760 Fortunately, we can at least fix this particular issue. We detect
1761 here the case where we are about to deliver a signal while software
1762 single-stepping with breakpoints removed. In this situation, we
1763 revert the decisions to remove all breakpoints and insert single-
1764 step breakpoints, and instead we install a step-resume breakpoint
1765 at the current address, deliver the signal without stepping, and
1766 once we arrive back at the step-resume breakpoint, actually step
1767 over the breakpoint we originally wanted to step over. */
1768 if (singlestep_breakpoints_inserted_p
1769 && tp->control.trap_expected && sig != TARGET_SIGNAL_0)
1770 {
1771 /* If we have nested signals or a pending signal is delivered
1772 immediately after a handler returns, might might already have
1773 a step-resume breakpoint set on the earlier handler. We cannot
1774 set another step-resume breakpoint; just continue on until the
1775 original breakpoint is hit. */
1776 if (tp->control.step_resume_breakpoint == NULL)
1777 {
1778 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
1779 tp->step_after_step_resume_breakpoint = 1;
1780 }
1781
1782 remove_single_step_breakpoints ();
1783 singlestep_breakpoints_inserted_p = 0;
1784
1785 insert_breakpoints ();
1786 tp->control.trap_expected = 0;
1787 }
1788
1789 if (should_resume)
1790 {
1791 ptid_t resume_ptid;
1792
1793 /* If STEP is set, it's a request to use hardware stepping
1794 facilities. But in that case, we should never
1795 use singlestep breakpoint. */
1796 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1797
1798 /* Decide the set of threads to ask the target to resume. Start
1799 by assuming everything will be resumed, than narrow the set
1800 by applying increasingly restricting conditions. */
1801 resume_ptid = user_visible_resume_ptid (step);
1802
1803 /* Maybe resume a single thread after all. */
1804 if (singlestep_breakpoints_inserted_p
1805 && stepping_past_singlestep_breakpoint)
1806 {
1807 /* The situation here is as follows. In thread T1 we wanted to
1808 single-step. Lacking hardware single-stepping we've
1809 set breakpoint at the PC of the next instruction -- call it
1810 P. After resuming, we've hit that breakpoint in thread T2.
1811 Now we've removed original breakpoint, inserted breakpoint
1812 at P+1, and try to step to advance T2 past breakpoint.
1813 We need to step only T2, as if T1 is allowed to freely run,
1814 it can run past P, and if other threads are allowed to run,
1815 they can hit breakpoint at P+1, and nested hits of single-step
1816 breakpoints is not something we'd want -- that's complicated
1817 to support, and has no value. */
1818 resume_ptid = inferior_ptid;
1819 }
1820 else if ((step || singlestep_breakpoints_inserted_p)
1821 && tp->control.trap_expected)
1822 {
1823 /* We're allowing a thread to run past a breakpoint it has
1824 hit, by single-stepping the thread with the breakpoint
1825 removed. In which case, we need to single-step only this
1826 thread, and keep others stopped, as they can miss this
1827 breakpoint if allowed to run.
1828
1829 The current code actually removes all breakpoints when
1830 doing this, not just the one being stepped over, so if we
1831 let other threads run, we can actually miss any
1832 breakpoint, not just the one at PC. */
1833 resume_ptid = inferior_ptid;
1834 }
1835
1836 if (gdbarch_cannot_step_breakpoint (gdbarch))
1837 {
1838 /* Most targets can step a breakpoint instruction, thus
1839 executing it normally. But if this one cannot, just
1840 continue and we will hit it anyway. */
1841 if (step && breakpoint_inserted_here_p (aspace, pc))
1842 step = 0;
1843 }
1844
1845 if (debug_displaced
1846 && use_displaced_stepping (gdbarch)
1847 && tp->control.trap_expected)
1848 {
1849 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1850 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1851 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1852 gdb_byte buf[4];
1853
1854 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1855 paddress (resume_gdbarch, actual_pc));
1856 read_memory (actual_pc, buf, sizeof (buf));
1857 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1858 }
1859
1860 /* Install inferior's terminal modes. */
1861 target_terminal_inferior ();
1862
1863 /* Avoid confusing the next resume, if the next stop/resume
1864 happens to apply to another thread. */
1865 tp->suspend.stop_signal = TARGET_SIGNAL_0;
1866
1867 /* Advise target which signals may be handled silently. If we have
1868 removed breakpoints because we are stepping over one (which can
1869 happen only if we are not using displaced stepping), we need to
1870 receive all signals to avoid accidentally skipping a breakpoint
1871 during execution of a signal handler. */
1872 if ((step || singlestep_breakpoints_inserted_p)
1873 && tp->control.trap_expected
1874 && !use_displaced_stepping (gdbarch))
1875 target_pass_signals (0, NULL);
1876 else
1877 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
1878
1879 target_resume (resume_ptid, step, sig);
1880 }
1881
1882 discard_cleanups (old_cleanups);
1883 }
1884 \f
1885 /* Proceeding. */
1886
1887 /* Clear out all variables saying what to do when inferior is continued.
1888 First do this, then set the ones you want, then call `proceed'. */
1889
1890 static void
1891 clear_proceed_status_thread (struct thread_info *tp)
1892 {
1893 if (debug_infrun)
1894 fprintf_unfiltered (gdb_stdlog,
1895 "infrun: clear_proceed_status_thread (%s)\n",
1896 target_pid_to_str (tp->ptid));
1897
1898 tp->control.trap_expected = 0;
1899 tp->control.step_range_start = 0;
1900 tp->control.step_range_end = 0;
1901 tp->control.step_frame_id = null_frame_id;
1902 tp->control.step_stack_frame_id = null_frame_id;
1903 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
1904 tp->stop_requested = 0;
1905
1906 tp->control.stop_step = 0;
1907
1908 tp->control.proceed_to_finish = 0;
1909
1910 /* Discard any remaining commands or status from previous stop. */
1911 bpstat_clear (&tp->control.stop_bpstat);
1912 }
1913
1914 static int
1915 clear_proceed_status_callback (struct thread_info *tp, void *data)
1916 {
1917 if (is_exited (tp->ptid))
1918 return 0;
1919
1920 clear_proceed_status_thread (tp);
1921 return 0;
1922 }
1923
1924 void
1925 clear_proceed_status (void)
1926 {
1927 if (!non_stop)
1928 {
1929 /* In all-stop mode, delete the per-thread status of all
1930 threads, even if inferior_ptid is null_ptid, there may be
1931 threads on the list. E.g., we may be launching a new
1932 process, while selecting the executable. */
1933 iterate_over_threads (clear_proceed_status_callback, NULL);
1934 }
1935
1936 if (!ptid_equal (inferior_ptid, null_ptid))
1937 {
1938 struct inferior *inferior;
1939
1940 if (non_stop)
1941 {
1942 /* If in non-stop mode, only delete the per-thread status of
1943 the current thread. */
1944 clear_proceed_status_thread (inferior_thread ());
1945 }
1946
1947 inferior = current_inferior ();
1948 inferior->control.stop_soon = NO_STOP_QUIETLY;
1949 }
1950
1951 stop_after_trap = 0;
1952
1953 observer_notify_about_to_proceed ();
1954
1955 if (stop_registers)
1956 {
1957 regcache_xfree (stop_registers);
1958 stop_registers = NULL;
1959 }
1960 }
1961
1962 /* Check the current thread against the thread that reported the most recent
1963 event. If a step-over is required return TRUE and set the current thread
1964 to the old thread. Otherwise return FALSE.
1965
1966 This should be suitable for any targets that support threads. */
1967
1968 static int
1969 prepare_to_proceed (int step)
1970 {
1971 ptid_t wait_ptid;
1972 struct target_waitstatus wait_status;
1973 int schedlock_enabled;
1974
1975 /* With non-stop mode on, threads are always handled individually. */
1976 gdb_assert (! non_stop);
1977
1978 /* Get the last target status returned by target_wait(). */
1979 get_last_target_status (&wait_ptid, &wait_status);
1980
1981 /* Make sure we were stopped at a breakpoint. */
1982 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1983 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1984 && wait_status.value.sig != TARGET_SIGNAL_ILL
1985 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1986 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1987 {
1988 return 0;
1989 }
1990
1991 schedlock_enabled = (scheduler_mode == schedlock_on
1992 || (scheduler_mode == schedlock_step
1993 && step));
1994
1995 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1996 if (schedlock_enabled)
1997 return 0;
1998
1999 /* Don't switch over if we're about to resume some other process
2000 other than WAIT_PTID's, and schedule-multiple is off. */
2001 if (!sched_multi
2002 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
2003 return 0;
2004
2005 /* Switched over from WAIT_PID. */
2006 if (!ptid_equal (wait_ptid, minus_one_ptid)
2007 && !ptid_equal (inferior_ptid, wait_ptid))
2008 {
2009 struct regcache *regcache = get_thread_regcache (wait_ptid);
2010
2011 if (breakpoint_here_p (get_regcache_aspace (regcache),
2012 regcache_read_pc (regcache)))
2013 {
2014 /* If stepping, remember current thread to switch back to. */
2015 if (step)
2016 deferred_step_ptid = inferior_ptid;
2017
2018 /* Switch back to WAIT_PID thread. */
2019 switch_to_thread (wait_ptid);
2020
2021 if (debug_infrun)
2022 fprintf_unfiltered (gdb_stdlog,
2023 "infrun: prepare_to_proceed (step=%d), "
2024 "switched to [%s]\n",
2025 step, target_pid_to_str (inferior_ptid));
2026
2027 /* We return 1 to indicate that there is a breakpoint here,
2028 so we need to step over it before continuing to avoid
2029 hitting it straight away. */
2030 return 1;
2031 }
2032 }
2033
2034 return 0;
2035 }
2036
2037 /* Basic routine for continuing the program in various fashions.
2038
2039 ADDR is the address to resume at, or -1 for resume where stopped.
2040 SIGGNAL is the signal to give it, or 0 for none,
2041 or -1 for act according to how it stopped.
2042 STEP is nonzero if should trap after one instruction.
2043 -1 means return after that and print nothing.
2044 You should probably set various step_... variables
2045 before calling here, if you are stepping.
2046
2047 You should call clear_proceed_status before calling proceed. */
2048
2049 void
2050 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
2051 {
2052 struct regcache *regcache;
2053 struct gdbarch *gdbarch;
2054 struct thread_info *tp;
2055 CORE_ADDR pc;
2056 struct address_space *aspace;
2057 int oneproc = 0;
2058
2059 /* If we're stopped at a fork/vfork, follow the branch set by the
2060 "set follow-fork-mode" command; otherwise, we'll just proceed
2061 resuming the current thread. */
2062 if (!follow_fork ())
2063 {
2064 /* The target for some reason decided not to resume. */
2065 normal_stop ();
2066 if (target_can_async_p ())
2067 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2068 return;
2069 }
2070
2071 /* We'll update this if & when we switch to a new thread. */
2072 previous_inferior_ptid = inferior_ptid;
2073
2074 regcache = get_current_regcache ();
2075 gdbarch = get_regcache_arch (regcache);
2076 aspace = get_regcache_aspace (regcache);
2077 pc = regcache_read_pc (regcache);
2078
2079 if (step > 0)
2080 step_start_function = find_pc_function (pc);
2081 if (step < 0)
2082 stop_after_trap = 1;
2083
2084 if (addr == (CORE_ADDR) -1)
2085 {
2086 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
2087 && execution_direction != EXEC_REVERSE)
2088 /* There is a breakpoint at the address we will resume at,
2089 step one instruction before inserting breakpoints so that
2090 we do not stop right away (and report a second hit at this
2091 breakpoint).
2092
2093 Note, we don't do this in reverse, because we won't
2094 actually be executing the breakpoint insn anyway.
2095 We'll be (un-)executing the previous instruction. */
2096
2097 oneproc = 1;
2098 else if (gdbarch_single_step_through_delay_p (gdbarch)
2099 && gdbarch_single_step_through_delay (gdbarch,
2100 get_current_frame ()))
2101 /* We stepped onto an instruction that needs to be stepped
2102 again before re-inserting the breakpoint, do so. */
2103 oneproc = 1;
2104 }
2105 else
2106 {
2107 regcache_write_pc (regcache, addr);
2108 }
2109
2110 if (debug_infrun)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
2113 paddress (gdbarch, addr), siggnal, step);
2114
2115 if (non_stop)
2116 /* In non-stop, each thread is handled individually. The context
2117 must already be set to the right thread here. */
2118 ;
2119 else
2120 {
2121 /* In a multi-threaded task we may select another thread and
2122 then continue or step.
2123
2124 But if the old thread was stopped at a breakpoint, it will
2125 immediately cause another breakpoint stop without any
2126 execution (i.e. it will report a breakpoint hit incorrectly).
2127 So we must step over it first.
2128
2129 prepare_to_proceed checks the current thread against the
2130 thread that reported the most recent event. If a step-over
2131 is required it returns TRUE and sets the current thread to
2132 the old thread. */
2133 if (prepare_to_proceed (step))
2134 oneproc = 1;
2135 }
2136
2137 /* prepare_to_proceed may change the current thread. */
2138 tp = inferior_thread ();
2139
2140 if (oneproc)
2141 {
2142 tp->control.trap_expected = 1;
2143 /* If displaced stepping is enabled, we can step over the
2144 breakpoint without hitting it, so leave all breakpoints
2145 inserted. Otherwise we need to disable all breakpoints, step
2146 one instruction, and then re-add them when that step is
2147 finished. */
2148 if (!use_displaced_stepping (gdbarch))
2149 remove_breakpoints ();
2150 }
2151
2152 /* We can insert breakpoints if we're not trying to step over one,
2153 or if we are stepping over one but we're using displaced stepping
2154 to do so. */
2155 if (! tp->control.trap_expected || use_displaced_stepping (gdbarch))
2156 insert_breakpoints ();
2157
2158 if (!non_stop)
2159 {
2160 /* Pass the last stop signal to the thread we're resuming,
2161 irrespective of whether the current thread is the thread that
2162 got the last event or not. This was historically GDB's
2163 behaviour before keeping a stop_signal per thread. */
2164
2165 struct thread_info *last_thread;
2166 ptid_t last_ptid;
2167 struct target_waitstatus last_status;
2168
2169 get_last_target_status (&last_ptid, &last_status);
2170 if (!ptid_equal (inferior_ptid, last_ptid)
2171 && !ptid_equal (last_ptid, null_ptid)
2172 && !ptid_equal (last_ptid, minus_one_ptid))
2173 {
2174 last_thread = find_thread_ptid (last_ptid);
2175 if (last_thread)
2176 {
2177 tp->suspend.stop_signal = last_thread->suspend.stop_signal;
2178 last_thread->suspend.stop_signal = TARGET_SIGNAL_0;
2179 }
2180 }
2181 }
2182
2183 if (siggnal != TARGET_SIGNAL_DEFAULT)
2184 tp->suspend.stop_signal = siggnal;
2185 /* If this signal should not be seen by program,
2186 give it zero. Used for debugging signals. */
2187 else if (!signal_program[tp->suspend.stop_signal])
2188 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2189
2190 annotate_starting ();
2191
2192 /* Make sure that output from GDB appears before output from the
2193 inferior. */
2194 gdb_flush (gdb_stdout);
2195
2196 /* Refresh prev_pc value just prior to resuming. This used to be
2197 done in stop_stepping, however, setting prev_pc there did not handle
2198 scenarios such as inferior function calls or returning from
2199 a function via the return command. In those cases, the prev_pc
2200 value was not set properly for subsequent commands. The prev_pc value
2201 is used to initialize the starting line number in the ecs. With an
2202 invalid value, the gdb next command ends up stopping at the position
2203 represented by the next line table entry past our start position.
2204 On platforms that generate one line table entry per line, this
2205 is not a problem. However, on the ia64, the compiler generates
2206 extraneous line table entries that do not increase the line number.
2207 When we issue the gdb next command on the ia64 after an inferior call
2208 or a return command, we often end up a few instructions forward, still
2209 within the original line we started.
2210
2211 An attempt was made to refresh the prev_pc at the same time the
2212 execution_control_state is initialized (for instance, just before
2213 waiting for an inferior event). But this approach did not work
2214 because of platforms that use ptrace, where the pc register cannot
2215 be read unless the inferior is stopped. At that point, we are not
2216 guaranteed the inferior is stopped and so the regcache_read_pc() call
2217 can fail. Setting the prev_pc value here ensures the value is updated
2218 correctly when the inferior is stopped. */
2219 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2220
2221 /* Fill in with reasonable starting values. */
2222 init_thread_stepping_state (tp);
2223
2224 /* Reset to normal state. */
2225 init_infwait_state ();
2226
2227 /* Resume inferior. */
2228 resume (oneproc || step || bpstat_should_step (), tp->suspend.stop_signal);
2229
2230 /* Wait for it to stop (if not standalone)
2231 and in any case decode why it stopped, and act accordingly. */
2232 /* Do this only if we are not using the event loop, or if the target
2233 does not support asynchronous execution. */
2234 if (!target_can_async_p ())
2235 {
2236 wait_for_inferior ();
2237 normal_stop ();
2238 }
2239 }
2240 \f
2241
2242 /* Start remote-debugging of a machine over a serial link. */
2243
2244 void
2245 start_remote (int from_tty)
2246 {
2247 struct inferior *inferior;
2248
2249 init_wait_for_inferior ();
2250 inferior = current_inferior ();
2251 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2252
2253 /* Always go on waiting for the target, regardless of the mode. */
2254 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2255 indicate to wait_for_inferior that a target should timeout if
2256 nothing is returned (instead of just blocking). Because of this,
2257 targets expecting an immediate response need to, internally, set
2258 things up so that the target_wait() is forced to eventually
2259 timeout. */
2260 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2261 differentiate to its caller what the state of the target is after
2262 the initial open has been performed. Here we're assuming that
2263 the target has stopped. It should be possible to eventually have
2264 target_open() return to the caller an indication that the target
2265 is currently running and GDB state should be set to the same as
2266 for an async run. */
2267 wait_for_inferior ();
2268
2269 /* Now that the inferior has stopped, do any bookkeeping like
2270 loading shared libraries. We want to do this before normal_stop,
2271 so that the displayed frame is up to date. */
2272 post_create_inferior (&current_target, from_tty);
2273
2274 normal_stop ();
2275 }
2276
2277 /* Initialize static vars when a new inferior begins. */
2278
2279 void
2280 init_wait_for_inferior (void)
2281 {
2282 /* These are meaningless until the first time through wait_for_inferior. */
2283
2284 breakpoint_init_inferior (inf_starting);
2285
2286 clear_proceed_status ();
2287
2288 stepping_past_singlestep_breakpoint = 0;
2289 deferred_step_ptid = null_ptid;
2290
2291 target_last_wait_ptid = minus_one_ptid;
2292
2293 previous_inferior_ptid = inferior_ptid;
2294 init_infwait_state ();
2295
2296 /* Discard any skipped inlined frames. */
2297 clear_inline_frame_state (minus_one_ptid);
2298 }
2299
2300 \f
2301 /* This enum encodes possible reasons for doing a target_wait, so that
2302 wfi can call target_wait in one place. (Ultimately the call will be
2303 moved out of the infinite loop entirely.) */
2304
2305 enum infwait_states
2306 {
2307 infwait_normal_state,
2308 infwait_thread_hop_state,
2309 infwait_step_watch_state,
2310 infwait_nonstep_watch_state
2311 };
2312
2313 /* The PTID we'll do a target_wait on.*/
2314 ptid_t waiton_ptid;
2315
2316 /* Current inferior wait state. */
2317 enum infwait_states infwait_state;
2318
2319 /* Data to be passed around while handling an event. This data is
2320 discarded between events. */
2321 struct execution_control_state
2322 {
2323 ptid_t ptid;
2324 /* The thread that got the event, if this was a thread event; NULL
2325 otherwise. */
2326 struct thread_info *event_thread;
2327
2328 struct target_waitstatus ws;
2329 int random_signal;
2330 CORE_ADDR stop_func_start;
2331 CORE_ADDR stop_func_end;
2332 char *stop_func_name;
2333 int new_thread_event;
2334 int wait_some_more;
2335 };
2336
2337 static void handle_inferior_event (struct execution_control_state *ecs);
2338
2339 static void handle_step_into_function (struct gdbarch *gdbarch,
2340 struct execution_control_state *ecs);
2341 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2342 struct execution_control_state *ecs);
2343 static void check_exception_resume (struct execution_control_state *,
2344 struct frame_info *, struct symbol *);
2345
2346 static void stop_stepping (struct execution_control_state *ecs);
2347 static void prepare_to_wait (struct execution_control_state *ecs);
2348 static void keep_going (struct execution_control_state *ecs);
2349
2350 /* Callback for iterate over threads. If the thread is stopped, but
2351 the user/frontend doesn't know about that yet, go through
2352 normal_stop, as if the thread had just stopped now. ARG points at
2353 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2354 ptid_is_pid(PTID) is true, applies to all threads of the process
2355 pointed at by PTID. Otherwise, apply only to the thread pointed by
2356 PTID. */
2357
2358 static int
2359 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2360 {
2361 ptid_t ptid = * (ptid_t *) arg;
2362
2363 if ((ptid_equal (info->ptid, ptid)
2364 || ptid_equal (minus_one_ptid, ptid)
2365 || (ptid_is_pid (ptid)
2366 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2367 && is_running (info->ptid)
2368 && !is_executing (info->ptid))
2369 {
2370 struct cleanup *old_chain;
2371 struct execution_control_state ecss;
2372 struct execution_control_state *ecs = &ecss;
2373
2374 memset (ecs, 0, sizeof (*ecs));
2375
2376 old_chain = make_cleanup_restore_current_thread ();
2377
2378 switch_to_thread (info->ptid);
2379
2380 /* Go through handle_inferior_event/normal_stop, so we always
2381 have consistent output as if the stop event had been
2382 reported. */
2383 ecs->ptid = info->ptid;
2384 ecs->event_thread = find_thread_ptid (info->ptid);
2385 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2386 ecs->ws.value.sig = TARGET_SIGNAL_0;
2387
2388 handle_inferior_event (ecs);
2389
2390 if (!ecs->wait_some_more)
2391 {
2392 struct thread_info *tp;
2393
2394 normal_stop ();
2395
2396 /* Finish off the continuations. */
2397 tp = inferior_thread ();
2398 do_all_intermediate_continuations_thread (tp, 1);
2399 do_all_continuations_thread (tp, 1);
2400 }
2401
2402 do_cleanups (old_chain);
2403 }
2404
2405 return 0;
2406 }
2407
2408 /* This function is attached as a "thread_stop_requested" observer.
2409 Cleanup local state that assumed the PTID was to be resumed, and
2410 report the stop to the frontend. */
2411
2412 static void
2413 infrun_thread_stop_requested (ptid_t ptid)
2414 {
2415 struct displaced_step_inferior_state *displaced;
2416
2417 /* PTID was requested to stop. Remove it from the displaced
2418 stepping queue, so we don't try to resume it automatically. */
2419
2420 for (displaced = displaced_step_inferior_states;
2421 displaced;
2422 displaced = displaced->next)
2423 {
2424 struct displaced_step_request *it, **prev_next_p;
2425
2426 it = displaced->step_request_queue;
2427 prev_next_p = &displaced->step_request_queue;
2428 while (it)
2429 {
2430 if (ptid_match (it->ptid, ptid))
2431 {
2432 *prev_next_p = it->next;
2433 it->next = NULL;
2434 xfree (it);
2435 }
2436 else
2437 {
2438 prev_next_p = &it->next;
2439 }
2440
2441 it = *prev_next_p;
2442 }
2443 }
2444
2445 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2446 }
2447
2448 static void
2449 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2450 {
2451 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2452 nullify_last_target_wait_ptid ();
2453 }
2454
2455 /* Callback for iterate_over_threads. */
2456
2457 static int
2458 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2459 {
2460 if (is_exited (info->ptid))
2461 return 0;
2462
2463 delete_step_resume_breakpoint (info);
2464 delete_exception_resume_breakpoint (info);
2465 return 0;
2466 }
2467
2468 /* In all-stop, delete the step resume breakpoint of any thread that
2469 had one. In non-stop, delete the step resume breakpoint of the
2470 thread that just stopped. */
2471
2472 static void
2473 delete_step_thread_step_resume_breakpoint (void)
2474 {
2475 if (!target_has_execution
2476 || ptid_equal (inferior_ptid, null_ptid))
2477 /* If the inferior has exited, we have already deleted the step
2478 resume breakpoints out of GDB's lists. */
2479 return;
2480
2481 if (non_stop)
2482 {
2483 /* If in non-stop mode, only delete the step-resume or
2484 longjmp-resume breakpoint of the thread that just stopped
2485 stepping. */
2486 struct thread_info *tp = inferior_thread ();
2487
2488 delete_step_resume_breakpoint (tp);
2489 delete_exception_resume_breakpoint (tp);
2490 }
2491 else
2492 /* In all-stop mode, delete all step-resume and longjmp-resume
2493 breakpoints of any thread that had them. */
2494 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2495 }
2496
2497 /* A cleanup wrapper. */
2498
2499 static void
2500 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2501 {
2502 delete_step_thread_step_resume_breakpoint ();
2503 }
2504
2505 /* Pretty print the results of target_wait, for debugging purposes. */
2506
2507 static void
2508 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2509 const struct target_waitstatus *ws)
2510 {
2511 char *status_string = target_waitstatus_to_string (ws);
2512 struct ui_file *tmp_stream = mem_fileopen ();
2513 char *text;
2514
2515 /* The text is split over several lines because it was getting too long.
2516 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2517 output as a unit; we want only one timestamp printed if debug_timestamp
2518 is set. */
2519
2520 fprintf_unfiltered (tmp_stream,
2521 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2522 if (PIDGET (waiton_ptid) != -1)
2523 fprintf_unfiltered (tmp_stream,
2524 " [%s]", target_pid_to_str (waiton_ptid));
2525 fprintf_unfiltered (tmp_stream, ", status) =\n");
2526 fprintf_unfiltered (tmp_stream,
2527 "infrun: %d [%s],\n",
2528 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2529 fprintf_unfiltered (tmp_stream,
2530 "infrun: %s\n",
2531 status_string);
2532
2533 text = ui_file_xstrdup (tmp_stream, NULL);
2534
2535 /* This uses %s in part to handle %'s in the text, but also to avoid
2536 a gcc error: the format attribute requires a string literal. */
2537 fprintf_unfiltered (gdb_stdlog, "%s", text);
2538
2539 xfree (status_string);
2540 xfree (text);
2541 ui_file_delete (tmp_stream);
2542 }
2543
2544 /* Prepare and stabilize the inferior for detaching it. E.g.,
2545 detaching while a thread is displaced stepping is a recipe for
2546 crashing it, as nothing would readjust the PC out of the scratch
2547 pad. */
2548
2549 void
2550 prepare_for_detach (void)
2551 {
2552 struct inferior *inf = current_inferior ();
2553 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2554 struct cleanup *old_chain_1;
2555 struct displaced_step_inferior_state *displaced;
2556
2557 displaced = get_displaced_stepping_state (inf->pid);
2558
2559 /* Is any thread of this process displaced stepping? If not,
2560 there's nothing else to do. */
2561 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2562 return;
2563
2564 if (debug_infrun)
2565 fprintf_unfiltered (gdb_stdlog,
2566 "displaced-stepping in-process while detaching");
2567
2568 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2569 inf->detaching = 1;
2570
2571 while (!ptid_equal (displaced->step_ptid, null_ptid))
2572 {
2573 struct cleanup *old_chain_2;
2574 struct execution_control_state ecss;
2575 struct execution_control_state *ecs;
2576
2577 ecs = &ecss;
2578 memset (ecs, 0, sizeof (*ecs));
2579
2580 overlay_cache_invalid = 1;
2581
2582 /* We have to invalidate the registers BEFORE calling
2583 target_wait because they can be loaded from the target while
2584 in target_wait. This makes remote debugging a bit more
2585 efficient for those targets that provide critical registers
2586 as part of their normal status mechanism. */
2587
2588 registers_changed ();
2589
2590 if (deprecated_target_wait_hook)
2591 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2592 else
2593 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2594
2595 if (debug_infrun)
2596 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2597
2598 /* If an error happens while handling the event, propagate GDB's
2599 knowledge of the executing state to the frontend/user running
2600 state. */
2601 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
2602 &minus_one_ptid);
2603
2604 /* In non-stop mode, each thread is handled individually.
2605 Switch early, so the global state is set correctly for this
2606 thread. */
2607 if (non_stop
2608 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2609 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2610 context_switch (ecs->ptid);
2611
2612 /* Now figure out what to do with the result of the result. */
2613 handle_inferior_event (ecs);
2614
2615 /* No error, don't finish the state yet. */
2616 discard_cleanups (old_chain_2);
2617
2618 /* Breakpoints and watchpoints are not installed on the target
2619 at this point, and signals are passed directly to the
2620 inferior, so this must mean the process is gone. */
2621 if (!ecs->wait_some_more)
2622 {
2623 discard_cleanups (old_chain_1);
2624 error (_("Program exited while detaching"));
2625 }
2626 }
2627
2628 discard_cleanups (old_chain_1);
2629 }
2630
2631 /* Wait for control to return from inferior to debugger.
2632
2633 If inferior gets a signal, we may decide to start it up again
2634 instead of returning. That is why there is a loop in this function.
2635 When this function actually returns it means the inferior
2636 should be left stopped and GDB should read more commands. */
2637
2638 void
2639 wait_for_inferior (void)
2640 {
2641 struct cleanup *old_cleanups;
2642 struct execution_control_state ecss;
2643 struct execution_control_state *ecs;
2644
2645 if (debug_infrun)
2646 fprintf_unfiltered
2647 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
2648
2649 old_cleanups =
2650 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2651
2652 ecs = &ecss;
2653 memset (ecs, 0, sizeof (*ecs));
2654
2655 while (1)
2656 {
2657 struct cleanup *old_chain;
2658
2659 /* We have to invalidate the registers BEFORE calling target_wait
2660 because they can be loaded from the target while in target_wait.
2661 This makes remote debugging a bit more efficient for those
2662 targets that provide critical registers as part of their normal
2663 status mechanism. */
2664
2665 overlay_cache_invalid = 1;
2666 registers_changed ();
2667
2668 if (deprecated_target_wait_hook)
2669 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2670 else
2671 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2672
2673 if (debug_infrun)
2674 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2675
2676 /* If an error happens while handling the event, propagate GDB's
2677 knowledge of the executing state to the frontend/user running
2678 state. */
2679 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2680
2681 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2682 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2683 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2684
2685 /* Now figure out what to do with the result of the result. */
2686 handle_inferior_event (ecs);
2687
2688 /* No error, don't finish the state yet. */
2689 discard_cleanups (old_chain);
2690
2691 if (!ecs->wait_some_more)
2692 break;
2693 }
2694
2695 do_cleanups (old_cleanups);
2696 }
2697
2698 /* Asynchronous version of wait_for_inferior. It is called by the
2699 event loop whenever a change of state is detected on the file
2700 descriptor corresponding to the target. It can be called more than
2701 once to complete a single execution command. In such cases we need
2702 to keep the state in a global variable ECSS. If it is the last time
2703 that this function is called for a single execution command, then
2704 report to the user that the inferior has stopped, and do the
2705 necessary cleanups. */
2706
2707 void
2708 fetch_inferior_event (void *client_data)
2709 {
2710 struct execution_control_state ecss;
2711 struct execution_control_state *ecs = &ecss;
2712 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2713 struct cleanup *ts_old_chain;
2714 int was_sync = sync_execution;
2715
2716 memset (ecs, 0, sizeof (*ecs));
2717
2718 /* We're handling a live event, so make sure we're doing live
2719 debugging. If we're looking at traceframes while the target is
2720 running, we're going to need to get back to that mode after
2721 handling the event. */
2722 if (non_stop)
2723 {
2724 make_cleanup_restore_current_traceframe ();
2725 set_current_traceframe (-1);
2726 }
2727
2728 if (non_stop)
2729 /* In non-stop mode, the user/frontend should not notice a thread
2730 switch due to internal events. Make sure we reverse to the
2731 user selected thread and frame after handling the event and
2732 running any breakpoint commands. */
2733 make_cleanup_restore_current_thread ();
2734
2735 /* We have to invalidate the registers BEFORE calling target_wait
2736 because they can be loaded from the target while in target_wait.
2737 This makes remote debugging a bit more efficient for those
2738 targets that provide critical registers as part of their normal
2739 status mechanism. */
2740
2741 overlay_cache_invalid = 1;
2742
2743 /* But don't do it if the current thread is already stopped (hence
2744 this is either a delayed event that will result in
2745 TARGET_WAITKIND_IGNORE, or it's an event for another thread (and
2746 we always clear the register and frame caches when the user
2747 switches threads anyway). If we didn't do this, a spurious
2748 delayed event in all-stop mode would make the user lose the
2749 selected frame. */
2750 if (non_stop || is_executing (inferior_ptid))
2751 registers_changed ();
2752
2753 make_cleanup_restore_integer (&execution_direction);
2754 execution_direction = target_execution_direction ();
2755
2756 if (deprecated_target_wait_hook)
2757 ecs->ptid =
2758 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2759 else
2760 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2761
2762 if (debug_infrun)
2763 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2764
2765 if (non_stop
2766 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2767 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2768 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2769 /* In non-stop mode, each thread is handled individually. Switch
2770 early, so the global state is set correctly for this
2771 thread. */
2772 context_switch (ecs->ptid);
2773
2774 /* If an error happens while handling the event, propagate GDB's
2775 knowledge of the executing state to the frontend/user running
2776 state. */
2777 if (!non_stop)
2778 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2779 else
2780 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2781
2782 /* Now figure out what to do with the result of the result. */
2783 handle_inferior_event (ecs);
2784
2785 if (!ecs->wait_some_more)
2786 {
2787 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2788
2789 delete_step_thread_step_resume_breakpoint ();
2790
2791 /* We may not find an inferior if this was a process exit. */
2792 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
2793 normal_stop ();
2794
2795 if (target_has_execution
2796 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2797 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2798 && ecs->event_thread->step_multi
2799 && ecs->event_thread->control.stop_step)
2800 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2801 else
2802 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2803 }
2804
2805 /* No error, don't finish the thread states yet. */
2806 discard_cleanups (ts_old_chain);
2807
2808 /* Revert thread and frame. */
2809 do_cleanups (old_chain);
2810
2811 /* If the inferior was in sync execution mode, and now isn't,
2812 restore the prompt. */
2813 if (was_sync && !sync_execution)
2814 display_gdb_prompt (0);
2815 }
2816
2817 /* Record the frame and location we're currently stepping through. */
2818 void
2819 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2820 {
2821 struct thread_info *tp = inferior_thread ();
2822
2823 tp->control.step_frame_id = get_frame_id (frame);
2824 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
2825
2826 tp->current_symtab = sal.symtab;
2827 tp->current_line = sal.line;
2828 }
2829
2830 /* Clear context switchable stepping state. */
2831
2832 void
2833 init_thread_stepping_state (struct thread_info *tss)
2834 {
2835 tss->stepping_over_breakpoint = 0;
2836 tss->step_after_step_resume_breakpoint = 0;
2837 tss->stepping_through_solib_after_catch = 0;
2838 tss->stepping_through_solib_catchpoints = NULL;
2839 }
2840
2841 /* Return the cached copy of the last pid/waitstatus returned by
2842 target_wait()/deprecated_target_wait_hook(). The data is actually
2843 cached by handle_inferior_event(), which gets called immediately
2844 after target_wait()/deprecated_target_wait_hook(). */
2845
2846 void
2847 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2848 {
2849 *ptidp = target_last_wait_ptid;
2850 *status = target_last_waitstatus;
2851 }
2852
2853 void
2854 nullify_last_target_wait_ptid (void)
2855 {
2856 target_last_wait_ptid = minus_one_ptid;
2857 }
2858
2859 /* Switch thread contexts. */
2860
2861 static void
2862 context_switch (ptid_t ptid)
2863 {
2864 if (debug_infrun)
2865 {
2866 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2867 target_pid_to_str (inferior_ptid));
2868 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2869 target_pid_to_str (ptid));
2870 }
2871
2872 switch_to_thread (ptid);
2873 }
2874
2875 static void
2876 adjust_pc_after_break (struct execution_control_state *ecs)
2877 {
2878 struct regcache *regcache;
2879 struct gdbarch *gdbarch;
2880 struct address_space *aspace;
2881 CORE_ADDR breakpoint_pc;
2882
2883 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2884 we aren't, just return.
2885
2886 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2887 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2888 implemented by software breakpoints should be handled through the normal
2889 breakpoint layer.
2890
2891 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2892 different signals (SIGILL or SIGEMT for instance), but it is less
2893 clear where the PC is pointing afterwards. It may not match
2894 gdbarch_decr_pc_after_break. I don't know any specific target that
2895 generates these signals at breakpoints (the code has been in GDB since at
2896 least 1992) so I can not guess how to handle them here.
2897
2898 In earlier versions of GDB, a target with
2899 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2900 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2901 target with both of these set in GDB history, and it seems unlikely to be
2902 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2903
2904 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2905 return;
2906
2907 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2908 return;
2909
2910 /* In reverse execution, when a breakpoint is hit, the instruction
2911 under it has already been de-executed. The reported PC always
2912 points at the breakpoint address, so adjusting it further would
2913 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2914 architecture:
2915
2916 B1 0x08000000 : INSN1
2917 B2 0x08000001 : INSN2
2918 0x08000002 : INSN3
2919 PC -> 0x08000003 : INSN4
2920
2921 Say you're stopped at 0x08000003 as above. Reverse continuing
2922 from that point should hit B2 as below. Reading the PC when the
2923 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2924 been de-executed already.
2925
2926 B1 0x08000000 : INSN1
2927 B2 PC -> 0x08000001 : INSN2
2928 0x08000002 : INSN3
2929 0x08000003 : INSN4
2930
2931 We can't apply the same logic as for forward execution, because
2932 we would wrongly adjust the PC to 0x08000000, since there's a
2933 breakpoint at PC - 1. We'd then report a hit on B1, although
2934 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2935 behaviour. */
2936 if (execution_direction == EXEC_REVERSE)
2937 return;
2938
2939 /* If this target does not decrement the PC after breakpoints, then
2940 we have nothing to do. */
2941 regcache = get_thread_regcache (ecs->ptid);
2942 gdbarch = get_regcache_arch (regcache);
2943 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2944 return;
2945
2946 aspace = get_regcache_aspace (regcache);
2947
2948 /* Find the location where (if we've hit a breakpoint) the
2949 breakpoint would be. */
2950 breakpoint_pc = regcache_read_pc (regcache)
2951 - gdbarch_decr_pc_after_break (gdbarch);
2952
2953 /* Check whether there actually is a software breakpoint inserted at
2954 that location.
2955
2956 If in non-stop mode, a race condition is possible where we've
2957 removed a breakpoint, but stop events for that breakpoint were
2958 already queued and arrive later. To suppress those spurious
2959 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2960 and retire them after a number of stop events are reported. */
2961 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2962 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2963 {
2964 struct cleanup *old_cleanups = NULL;
2965
2966 if (RECORD_IS_USED)
2967 old_cleanups = record_gdb_operation_disable_set ();
2968
2969 /* When using hardware single-step, a SIGTRAP is reported for both
2970 a completed single-step and a software breakpoint. Need to
2971 differentiate between the two, as the latter needs adjusting
2972 but the former does not.
2973
2974 The SIGTRAP can be due to a completed hardware single-step only if
2975 - we didn't insert software single-step breakpoints
2976 - the thread to be examined is still the current thread
2977 - this thread is currently being stepped
2978
2979 If any of these events did not occur, we must have stopped due
2980 to hitting a software breakpoint, and have to back up to the
2981 breakpoint address.
2982
2983 As a special case, we could have hardware single-stepped a
2984 software breakpoint. In this case (prev_pc == breakpoint_pc),
2985 we also need to back up to the breakpoint address. */
2986
2987 if (singlestep_breakpoints_inserted_p
2988 || !ptid_equal (ecs->ptid, inferior_ptid)
2989 || !currently_stepping (ecs->event_thread)
2990 || ecs->event_thread->prev_pc == breakpoint_pc)
2991 regcache_write_pc (regcache, breakpoint_pc);
2992
2993 if (RECORD_IS_USED)
2994 do_cleanups (old_cleanups);
2995 }
2996 }
2997
2998 void
2999 init_infwait_state (void)
3000 {
3001 waiton_ptid = pid_to_ptid (-1);
3002 infwait_state = infwait_normal_state;
3003 }
3004
3005 void
3006 error_is_running (void)
3007 {
3008 error (_("Cannot execute this command while "
3009 "the selected thread is running."));
3010 }
3011
3012 void
3013 ensure_not_running (void)
3014 {
3015 if (is_running (inferior_ptid))
3016 error_is_running ();
3017 }
3018
3019 static int
3020 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3021 {
3022 for (frame = get_prev_frame (frame);
3023 frame != NULL;
3024 frame = get_prev_frame (frame))
3025 {
3026 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3027 return 1;
3028 if (get_frame_type (frame) != INLINE_FRAME)
3029 break;
3030 }
3031
3032 return 0;
3033 }
3034
3035 /* Auxiliary function that handles syscall entry/return events.
3036 It returns 1 if the inferior should keep going (and GDB
3037 should ignore the event), or 0 if the event deserves to be
3038 processed. */
3039
3040 static int
3041 handle_syscall_event (struct execution_control_state *ecs)
3042 {
3043 struct regcache *regcache;
3044 struct gdbarch *gdbarch;
3045 int syscall_number;
3046
3047 if (!ptid_equal (ecs->ptid, inferior_ptid))
3048 context_switch (ecs->ptid);
3049
3050 regcache = get_thread_regcache (ecs->ptid);
3051 gdbarch = get_regcache_arch (regcache);
3052 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
3053 stop_pc = regcache_read_pc (regcache);
3054
3055 target_last_waitstatus.value.syscall_number = syscall_number;
3056
3057 if (catch_syscall_enabled () > 0
3058 && catching_syscall_number (syscall_number) > 0)
3059 {
3060 if (debug_infrun)
3061 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3062 syscall_number);
3063
3064 ecs->event_thread->control.stop_bpstat
3065 = bpstat_stop_status (get_regcache_aspace (regcache),
3066 stop_pc, ecs->ptid);
3067 ecs->random_signal
3068 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3069
3070 if (!ecs->random_signal)
3071 {
3072 /* Catchpoint hit. */
3073 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3074 return 0;
3075 }
3076 }
3077
3078 /* If no catchpoint triggered for this, then keep going. */
3079 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3080 keep_going (ecs);
3081 return 1;
3082 }
3083
3084 /* Given an execution control state that has been freshly filled in
3085 by an event from the inferior, figure out what it means and take
3086 appropriate action. */
3087
3088 static void
3089 handle_inferior_event (struct execution_control_state *ecs)
3090 {
3091 struct frame_info *frame;
3092 struct gdbarch *gdbarch;
3093 int sw_single_step_trap_p = 0;
3094 int stopped_by_watchpoint;
3095 int stepped_after_stopped_by_watchpoint = 0;
3096 struct symtab_and_line stop_pc_sal;
3097 enum stop_kind stop_soon;
3098
3099 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3100 {
3101 /* We had an event in the inferior, but we are not interested in
3102 handling it at this level. The lower layers have already
3103 done what needs to be done, if anything.
3104
3105 One of the possible circumstances for this is when the
3106 inferior produces output for the console. The inferior has
3107 not stopped, and we are ignoring the event. Another possible
3108 circumstance is any event which the lower level knows will be
3109 reported multiple times without an intervening resume. */
3110 if (debug_infrun)
3111 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3112 prepare_to_wait (ecs);
3113 return;
3114 }
3115
3116 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3117 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3118 {
3119 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3120
3121 gdb_assert (inf);
3122 stop_soon = inf->control.stop_soon;
3123 }
3124 else
3125 stop_soon = NO_STOP_QUIETLY;
3126
3127 /* Cache the last pid/waitstatus. */
3128 target_last_wait_ptid = ecs->ptid;
3129 target_last_waitstatus = ecs->ws;
3130
3131 /* Always clear state belonging to the previous time we stopped. */
3132 stop_stack_dummy = STOP_NONE;
3133
3134 /* If it's a new process, add it to the thread database. */
3135
3136 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3137 && !ptid_equal (ecs->ptid, minus_one_ptid)
3138 && !in_thread_list (ecs->ptid));
3139
3140 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3141 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3142 add_thread (ecs->ptid);
3143
3144 ecs->event_thread = find_thread_ptid (ecs->ptid);
3145
3146 /* Dependent on valid ECS->EVENT_THREAD. */
3147 adjust_pc_after_break (ecs);
3148
3149 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3150 reinit_frame_cache ();
3151
3152 breakpoint_retire_moribund ();
3153
3154 /* First, distinguish signals caused by the debugger from signals
3155 that have to do with the program's own actions. Note that
3156 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3157 on the operating system version. Here we detect when a SIGILL or
3158 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3159 something similar for SIGSEGV, since a SIGSEGV will be generated
3160 when we're trying to execute a breakpoint instruction on a
3161 non-executable stack. This happens for call dummy breakpoints
3162 for architectures like SPARC that place call dummies on the
3163 stack. */
3164 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3165 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3166 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3167 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3168 {
3169 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3170
3171 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3172 regcache_read_pc (regcache)))
3173 {
3174 if (debug_infrun)
3175 fprintf_unfiltered (gdb_stdlog,
3176 "infrun: Treating signal as SIGTRAP\n");
3177 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3178 }
3179 }
3180
3181 /* Mark the non-executing threads accordingly. In all-stop, all
3182 threads of all processes are stopped when we get any event
3183 reported. In non-stop mode, only the event thread stops. If
3184 we're handling a process exit in non-stop mode, there's nothing
3185 to do, as threads of the dead process are gone, and threads of
3186 any other process were left running. */
3187 if (!non_stop)
3188 set_executing (minus_one_ptid, 0);
3189 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3190 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3191 set_executing (inferior_ptid, 0);
3192
3193 switch (infwait_state)
3194 {
3195 case infwait_thread_hop_state:
3196 if (debug_infrun)
3197 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3198 break;
3199
3200 case infwait_normal_state:
3201 if (debug_infrun)
3202 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3203 break;
3204
3205 case infwait_step_watch_state:
3206 if (debug_infrun)
3207 fprintf_unfiltered (gdb_stdlog,
3208 "infrun: infwait_step_watch_state\n");
3209
3210 stepped_after_stopped_by_watchpoint = 1;
3211 break;
3212
3213 case infwait_nonstep_watch_state:
3214 if (debug_infrun)
3215 fprintf_unfiltered (gdb_stdlog,
3216 "infrun: infwait_nonstep_watch_state\n");
3217 insert_breakpoints ();
3218
3219 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3220 handle things like signals arriving and other things happening
3221 in combination correctly? */
3222 stepped_after_stopped_by_watchpoint = 1;
3223 break;
3224
3225 default:
3226 internal_error (__FILE__, __LINE__, _("bad switch"));
3227 }
3228
3229 infwait_state = infwait_normal_state;
3230 waiton_ptid = pid_to_ptid (-1);
3231
3232 switch (ecs->ws.kind)
3233 {
3234 case TARGET_WAITKIND_LOADED:
3235 if (debug_infrun)
3236 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3237 /* Ignore gracefully during startup of the inferior, as it might
3238 be the shell which has just loaded some objects, otherwise
3239 add the symbols for the newly loaded objects. Also ignore at
3240 the beginning of an attach or remote session; we will query
3241 the full list of libraries once the connection is
3242 established. */
3243 if (stop_soon == NO_STOP_QUIETLY)
3244 {
3245 /* Check for any newly added shared libraries if we're
3246 supposed to be adding them automatically. Switch
3247 terminal for any messages produced by
3248 breakpoint_re_set. */
3249 target_terminal_ours_for_output ();
3250 /* NOTE: cagney/2003-11-25: Make certain that the target
3251 stack's section table is kept up-to-date. Architectures,
3252 (e.g., PPC64), use the section table to perform
3253 operations such as address => section name and hence
3254 require the table to contain all sections (including
3255 those found in shared libraries). */
3256 #ifdef SOLIB_ADD
3257 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3258 #else
3259 solib_add (NULL, 0, &current_target, auto_solib_add);
3260 #endif
3261 target_terminal_inferior ();
3262
3263 /* If requested, stop when the dynamic linker notifies
3264 gdb of events. This allows the user to get control
3265 and place breakpoints in initializer routines for
3266 dynamically loaded objects (among other things). */
3267 if (stop_on_solib_events)
3268 {
3269 /* Make sure we print "Stopped due to solib-event" in
3270 normal_stop. */
3271 stop_print_frame = 1;
3272
3273 stop_stepping (ecs);
3274 return;
3275 }
3276
3277 /* NOTE drow/2007-05-11: This might be a good place to check
3278 for "catch load". */
3279 }
3280
3281 /* If we are skipping through a shell, or through shared library
3282 loading that we aren't interested in, resume the program. If
3283 we're running the program normally, also resume. But stop if
3284 we're attaching or setting up a remote connection. */
3285 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3286 {
3287 /* Loading of shared libraries might have changed breakpoint
3288 addresses. Make sure new breakpoints are inserted. */
3289 if (stop_soon == NO_STOP_QUIETLY
3290 && !breakpoints_always_inserted_mode ())
3291 insert_breakpoints ();
3292 resume (0, TARGET_SIGNAL_0);
3293 prepare_to_wait (ecs);
3294 return;
3295 }
3296
3297 break;
3298
3299 case TARGET_WAITKIND_SPURIOUS:
3300 if (debug_infrun)
3301 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3302 resume (0, TARGET_SIGNAL_0);
3303 prepare_to_wait (ecs);
3304 return;
3305
3306 case TARGET_WAITKIND_EXITED:
3307 if (debug_infrun)
3308 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3309 inferior_ptid = ecs->ptid;
3310 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3311 set_current_program_space (current_inferior ()->pspace);
3312 handle_vfork_child_exec_or_exit (0);
3313 target_terminal_ours (); /* Must do this before mourn anyway. */
3314 print_exited_reason (ecs->ws.value.integer);
3315
3316 /* Record the exit code in the convenience variable $_exitcode, so
3317 that the user can inspect this again later. */
3318 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3319 (LONGEST) ecs->ws.value.integer);
3320
3321 /* Also record this in the inferior itself. */
3322 current_inferior ()->has_exit_code = 1;
3323 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3324
3325 gdb_flush (gdb_stdout);
3326 target_mourn_inferior ();
3327 singlestep_breakpoints_inserted_p = 0;
3328 cancel_single_step_breakpoints ();
3329 stop_print_frame = 0;
3330 stop_stepping (ecs);
3331 return;
3332
3333 case TARGET_WAITKIND_SIGNALLED:
3334 if (debug_infrun)
3335 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3336 inferior_ptid = ecs->ptid;
3337 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3338 set_current_program_space (current_inferior ()->pspace);
3339 handle_vfork_child_exec_or_exit (0);
3340 stop_print_frame = 0;
3341 target_terminal_ours (); /* Must do this before mourn anyway. */
3342
3343 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3344 reach here unless the inferior is dead. However, for years
3345 target_kill() was called here, which hints that fatal signals aren't
3346 really fatal on some systems. If that's true, then some changes
3347 may be needed. */
3348 target_mourn_inferior ();
3349
3350 print_signal_exited_reason (ecs->ws.value.sig);
3351 singlestep_breakpoints_inserted_p = 0;
3352 cancel_single_step_breakpoints ();
3353 stop_stepping (ecs);
3354 return;
3355
3356 /* The following are the only cases in which we keep going;
3357 the above cases end in a continue or goto. */
3358 case TARGET_WAITKIND_FORKED:
3359 case TARGET_WAITKIND_VFORKED:
3360 if (debug_infrun)
3361 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3362
3363 if (!ptid_equal (ecs->ptid, inferior_ptid))
3364 {
3365 context_switch (ecs->ptid);
3366 reinit_frame_cache ();
3367 }
3368
3369 /* Immediately detach breakpoints from the child before there's
3370 any chance of letting the user delete breakpoints from the
3371 breakpoint lists. If we don't do this early, it's easy to
3372 leave left over traps in the child, vis: "break foo; catch
3373 fork; c; <fork>; del; c; <child calls foo>". We only follow
3374 the fork on the last `continue', and by that time the
3375 breakpoint at "foo" is long gone from the breakpoint table.
3376 If we vforked, then we don't need to unpatch here, since both
3377 parent and child are sharing the same memory pages; we'll
3378 need to unpatch at follow/detach time instead to be certain
3379 that new breakpoints added between catchpoint hit time and
3380 vfork follow are detached. */
3381 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3382 {
3383 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3384
3385 /* This won't actually modify the breakpoint list, but will
3386 physically remove the breakpoints from the child. */
3387 detach_breakpoints (child_pid);
3388 }
3389
3390 if (singlestep_breakpoints_inserted_p)
3391 {
3392 /* Pull the single step breakpoints out of the target. */
3393 remove_single_step_breakpoints ();
3394 singlestep_breakpoints_inserted_p = 0;
3395 }
3396
3397 /* In case the event is caught by a catchpoint, remember that
3398 the event is to be followed at the next resume of the thread,
3399 and not immediately. */
3400 ecs->event_thread->pending_follow = ecs->ws;
3401
3402 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3403
3404 ecs->event_thread->control.stop_bpstat
3405 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3406 stop_pc, ecs->ptid);
3407
3408 /* Note that we're interested in knowing the bpstat actually
3409 causes a stop, not just if it may explain the signal.
3410 Software watchpoints, for example, always appear in the
3411 bpstat. */
3412 ecs->random_signal
3413 = !bpstat_causes_stop (ecs->event_thread->control.stop_bpstat);
3414
3415 /* If no catchpoint triggered for this, then keep going. */
3416 if (ecs->random_signal)
3417 {
3418 ptid_t parent;
3419 ptid_t child;
3420 int should_resume;
3421 int follow_child
3422 = (follow_fork_mode_string == follow_fork_mode_child);
3423
3424 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3425
3426 should_resume = follow_fork ();
3427
3428 parent = ecs->ptid;
3429 child = ecs->ws.value.related_pid;
3430
3431 /* In non-stop mode, also resume the other branch. */
3432 if (non_stop && !detach_fork)
3433 {
3434 if (follow_child)
3435 switch_to_thread (parent);
3436 else
3437 switch_to_thread (child);
3438
3439 ecs->event_thread = inferior_thread ();
3440 ecs->ptid = inferior_ptid;
3441 keep_going (ecs);
3442 }
3443
3444 if (follow_child)
3445 switch_to_thread (child);
3446 else
3447 switch_to_thread (parent);
3448
3449 ecs->event_thread = inferior_thread ();
3450 ecs->ptid = inferior_ptid;
3451
3452 if (should_resume)
3453 keep_going (ecs);
3454 else
3455 stop_stepping (ecs);
3456 return;
3457 }
3458 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3459 goto process_event_stop_test;
3460
3461 case TARGET_WAITKIND_VFORK_DONE:
3462 /* Done with the shared memory region. Re-insert breakpoints in
3463 the parent, and keep going. */
3464
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog,
3467 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3468
3469 if (!ptid_equal (ecs->ptid, inferior_ptid))
3470 context_switch (ecs->ptid);
3471
3472 current_inferior ()->waiting_for_vfork_done = 0;
3473 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3474 /* This also takes care of reinserting breakpoints in the
3475 previously locked inferior. */
3476 keep_going (ecs);
3477 return;
3478
3479 case TARGET_WAITKIND_EXECD:
3480 if (debug_infrun)
3481 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3482
3483 if (!ptid_equal (ecs->ptid, inferior_ptid))
3484 {
3485 context_switch (ecs->ptid);
3486 reinit_frame_cache ();
3487 }
3488
3489 singlestep_breakpoints_inserted_p = 0;
3490 cancel_single_step_breakpoints ();
3491
3492 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3493
3494 /* Do whatever is necessary to the parent branch of the vfork. */
3495 handle_vfork_child_exec_or_exit (1);
3496
3497 /* This causes the eventpoints and symbol table to be reset.
3498 Must do this now, before trying to determine whether to
3499 stop. */
3500 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3501
3502 ecs->event_thread->control.stop_bpstat
3503 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3504 stop_pc, ecs->ptid);
3505 ecs->random_signal
3506 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat);
3507
3508 /* Note that this may be referenced from inside
3509 bpstat_stop_status above, through inferior_has_execd. */
3510 xfree (ecs->ws.value.execd_pathname);
3511 ecs->ws.value.execd_pathname = NULL;
3512
3513 /* If no catchpoint triggered for this, then keep going. */
3514 if (ecs->random_signal)
3515 {
3516 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3517 keep_going (ecs);
3518 return;
3519 }
3520 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
3521 goto process_event_stop_test;
3522
3523 /* Be careful not to try to gather much state about a thread
3524 that's in a syscall. It's frequently a losing proposition. */
3525 case TARGET_WAITKIND_SYSCALL_ENTRY:
3526 if (debug_infrun)
3527 fprintf_unfiltered (gdb_stdlog,
3528 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3529 /* Getting the current syscall number. */
3530 if (handle_syscall_event (ecs) != 0)
3531 return;
3532 goto process_event_stop_test;
3533
3534 /* Before examining the threads further, step this thread to
3535 get it entirely out of the syscall. (We get notice of the
3536 event when the thread is just on the verge of exiting a
3537 syscall. Stepping one instruction seems to get it back
3538 into user code.) */
3539 case TARGET_WAITKIND_SYSCALL_RETURN:
3540 if (debug_infrun)
3541 fprintf_unfiltered (gdb_stdlog,
3542 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3543 if (handle_syscall_event (ecs) != 0)
3544 return;
3545 goto process_event_stop_test;
3546
3547 case TARGET_WAITKIND_STOPPED:
3548 if (debug_infrun)
3549 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3550 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
3551 break;
3552
3553 case TARGET_WAITKIND_NO_HISTORY:
3554 /* Reverse execution: target ran out of history info. */
3555 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3556 print_no_history_reason ();
3557 stop_stepping (ecs);
3558 return;
3559 }
3560
3561 if (ecs->new_thread_event)
3562 {
3563 if (non_stop)
3564 /* Non-stop assumes that the target handles adding new threads
3565 to the thread list. */
3566 internal_error (__FILE__, __LINE__,
3567 "targets should add new threads to the thread "
3568 "list themselves in non-stop mode.");
3569
3570 /* We may want to consider not doing a resume here in order to
3571 give the user a chance to play with the new thread. It might
3572 be good to make that a user-settable option. */
3573
3574 /* At this point, all threads are stopped (happens automatically
3575 in either the OS or the native code). Therefore we need to
3576 continue all threads in order to make progress. */
3577
3578 if (!ptid_equal (ecs->ptid, inferior_ptid))
3579 context_switch (ecs->ptid);
3580 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3581 prepare_to_wait (ecs);
3582 return;
3583 }
3584
3585 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3586 {
3587 /* Do we need to clean up the state of a thread that has
3588 completed a displaced single-step? (Doing so usually affects
3589 the PC, so do it here, before we set stop_pc.) */
3590 displaced_step_fixup (ecs->ptid,
3591 ecs->event_thread->suspend.stop_signal);
3592
3593 /* If we either finished a single-step or hit a breakpoint, but
3594 the user wanted this thread to be stopped, pretend we got a
3595 SIG0 (generic unsignaled stop). */
3596
3597 if (ecs->event_thread->stop_requested
3598 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3599 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3600 }
3601
3602 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3603
3604 if (debug_infrun)
3605 {
3606 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3607 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3608 struct cleanup *old_chain = save_inferior_ptid ();
3609
3610 inferior_ptid = ecs->ptid;
3611
3612 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3613 paddress (gdbarch, stop_pc));
3614 if (target_stopped_by_watchpoint ())
3615 {
3616 CORE_ADDR addr;
3617
3618 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3619
3620 if (target_stopped_data_address (&current_target, &addr))
3621 fprintf_unfiltered (gdb_stdlog,
3622 "infrun: stopped data address = %s\n",
3623 paddress (gdbarch, addr));
3624 else
3625 fprintf_unfiltered (gdb_stdlog,
3626 "infrun: (no data address available)\n");
3627 }
3628
3629 do_cleanups (old_chain);
3630 }
3631
3632 if (stepping_past_singlestep_breakpoint)
3633 {
3634 gdb_assert (singlestep_breakpoints_inserted_p);
3635 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3636 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3637
3638 stepping_past_singlestep_breakpoint = 0;
3639
3640 /* We've either finished single-stepping past the single-step
3641 breakpoint, or stopped for some other reason. It would be nice if
3642 we could tell, but we can't reliably. */
3643 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3644 {
3645 if (debug_infrun)
3646 fprintf_unfiltered (gdb_stdlog,
3647 "infrun: stepping_past_"
3648 "singlestep_breakpoint\n");
3649 /* Pull the single step breakpoints out of the target. */
3650 remove_single_step_breakpoints ();
3651 singlestep_breakpoints_inserted_p = 0;
3652
3653 ecs->random_signal = 0;
3654 ecs->event_thread->control.trap_expected = 0;
3655
3656 context_switch (saved_singlestep_ptid);
3657 if (deprecated_context_hook)
3658 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3659
3660 resume (1, TARGET_SIGNAL_0);
3661 prepare_to_wait (ecs);
3662 return;
3663 }
3664 }
3665
3666 if (!ptid_equal (deferred_step_ptid, null_ptid))
3667 {
3668 /* In non-stop mode, there's never a deferred_step_ptid set. */
3669 gdb_assert (!non_stop);
3670
3671 /* If we stopped for some other reason than single-stepping, ignore
3672 the fact that we were supposed to switch back. */
3673 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3674 {
3675 if (debug_infrun)
3676 fprintf_unfiltered (gdb_stdlog,
3677 "infrun: handling deferred step\n");
3678
3679 /* Pull the single step breakpoints out of the target. */
3680 if (singlestep_breakpoints_inserted_p)
3681 {
3682 remove_single_step_breakpoints ();
3683 singlestep_breakpoints_inserted_p = 0;
3684 }
3685
3686 ecs->event_thread->control.trap_expected = 0;
3687
3688 /* Note: We do not call context_switch at this point, as the
3689 context is already set up for stepping the original thread. */
3690 switch_to_thread (deferred_step_ptid);
3691 deferred_step_ptid = null_ptid;
3692 /* Suppress spurious "Switching to ..." message. */
3693 previous_inferior_ptid = inferior_ptid;
3694
3695 resume (1, TARGET_SIGNAL_0);
3696 prepare_to_wait (ecs);
3697 return;
3698 }
3699
3700 deferred_step_ptid = null_ptid;
3701 }
3702
3703 /* See if a thread hit a thread-specific breakpoint that was meant for
3704 another thread. If so, then step that thread past the breakpoint,
3705 and continue it. */
3706
3707 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
3708 {
3709 int thread_hop_needed = 0;
3710 struct address_space *aspace =
3711 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3712
3713 /* Check if a regular breakpoint has been hit before checking
3714 for a potential single step breakpoint. Otherwise, GDB will
3715 not see this breakpoint hit when stepping onto breakpoints. */
3716 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3717 {
3718 ecs->random_signal = 0;
3719 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3720 thread_hop_needed = 1;
3721 }
3722 else if (singlestep_breakpoints_inserted_p)
3723 {
3724 /* We have not context switched yet, so this should be true
3725 no matter which thread hit the singlestep breakpoint. */
3726 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3727 if (debug_infrun)
3728 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3729 "trap for %s\n",
3730 target_pid_to_str (ecs->ptid));
3731
3732 ecs->random_signal = 0;
3733 /* The call to in_thread_list is necessary because PTIDs sometimes
3734 change when we go from single-threaded to multi-threaded. If
3735 the singlestep_ptid is still in the list, assume that it is
3736 really different from ecs->ptid. */
3737 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3738 && in_thread_list (singlestep_ptid))
3739 {
3740 /* If the PC of the thread we were trying to single-step
3741 has changed, discard this event (which we were going
3742 to ignore anyway), and pretend we saw that thread
3743 trap. This prevents us continuously moving the
3744 single-step breakpoint forward, one instruction at a
3745 time. If the PC has changed, then the thread we were
3746 trying to single-step has trapped or been signalled,
3747 but the event has not been reported to GDB yet.
3748
3749 There might be some cases where this loses signal
3750 information, if a signal has arrived at exactly the
3751 same time that the PC changed, but this is the best
3752 we can do with the information available. Perhaps we
3753 should arrange to report all events for all threads
3754 when they stop, or to re-poll the remote looking for
3755 this particular thread (i.e. temporarily enable
3756 schedlock). */
3757
3758 CORE_ADDR new_singlestep_pc
3759 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3760
3761 if (new_singlestep_pc != singlestep_pc)
3762 {
3763 enum target_signal stop_signal;
3764
3765 if (debug_infrun)
3766 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3767 " but expected thread advanced also\n");
3768
3769 /* The current context still belongs to
3770 singlestep_ptid. Don't swap here, since that's
3771 the context we want to use. Just fudge our
3772 state and continue. */
3773 stop_signal = ecs->event_thread->suspend.stop_signal;
3774 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
3775 ecs->ptid = singlestep_ptid;
3776 ecs->event_thread = find_thread_ptid (ecs->ptid);
3777 ecs->event_thread->suspend.stop_signal = stop_signal;
3778 stop_pc = new_singlestep_pc;
3779 }
3780 else
3781 {
3782 if (debug_infrun)
3783 fprintf_unfiltered (gdb_stdlog,
3784 "infrun: unexpected thread\n");
3785
3786 thread_hop_needed = 1;
3787 stepping_past_singlestep_breakpoint = 1;
3788 saved_singlestep_ptid = singlestep_ptid;
3789 }
3790 }
3791 }
3792
3793 if (thread_hop_needed)
3794 {
3795 struct regcache *thread_regcache;
3796 int remove_status = 0;
3797
3798 if (debug_infrun)
3799 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3800
3801 /* Switch context before touching inferior memory, the
3802 previous thread may have exited. */
3803 if (!ptid_equal (inferior_ptid, ecs->ptid))
3804 context_switch (ecs->ptid);
3805
3806 /* Saw a breakpoint, but it was hit by the wrong thread.
3807 Just continue. */
3808
3809 if (singlestep_breakpoints_inserted_p)
3810 {
3811 /* Pull the single step breakpoints out of the target. */
3812 remove_single_step_breakpoints ();
3813 singlestep_breakpoints_inserted_p = 0;
3814 }
3815
3816 /* If the arch can displace step, don't remove the
3817 breakpoints. */
3818 thread_regcache = get_thread_regcache (ecs->ptid);
3819 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3820 remove_status = remove_breakpoints ();
3821
3822 /* Did we fail to remove breakpoints? If so, try
3823 to set the PC past the bp. (There's at least
3824 one situation in which we can fail to remove
3825 the bp's: On HP-UX's that use ttrace, we can't
3826 change the address space of a vforking child
3827 process until the child exits (well, okay, not
3828 then either :-) or execs. */
3829 if (remove_status != 0)
3830 error (_("Cannot step over breakpoint hit in wrong thread"));
3831 else
3832 { /* Single step */
3833 if (!non_stop)
3834 {
3835 /* Only need to require the next event from this
3836 thread in all-stop mode. */
3837 waiton_ptid = ecs->ptid;
3838 infwait_state = infwait_thread_hop_state;
3839 }
3840
3841 ecs->event_thread->stepping_over_breakpoint = 1;
3842 keep_going (ecs);
3843 return;
3844 }
3845 }
3846 else if (singlestep_breakpoints_inserted_p)
3847 {
3848 sw_single_step_trap_p = 1;
3849 ecs->random_signal = 0;
3850 }
3851 }
3852 else
3853 ecs->random_signal = 1;
3854
3855 /* See if something interesting happened to the non-current thread. If
3856 so, then switch to that thread. */
3857 if (!ptid_equal (ecs->ptid, inferior_ptid))
3858 {
3859 if (debug_infrun)
3860 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3861
3862 context_switch (ecs->ptid);
3863
3864 if (deprecated_context_hook)
3865 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3866 }
3867
3868 /* At this point, get hold of the now-current thread's frame. */
3869 frame = get_current_frame ();
3870 gdbarch = get_frame_arch (frame);
3871
3872 if (singlestep_breakpoints_inserted_p)
3873 {
3874 /* Pull the single step breakpoints out of the target. */
3875 remove_single_step_breakpoints ();
3876 singlestep_breakpoints_inserted_p = 0;
3877 }
3878
3879 if (stepped_after_stopped_by_watchpoint)
3880 stopped_by_watchpoint = 0;
3881 else
3882 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3883
3884 /* If necessary, step over this watchpoint. We'll be back to display
3885 it in a moment. */
3886 if (stopped_by_watchpoint
3887 && (target_have_steppable_watchpoint
3888 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3889 {
3890 /* At this point, we are stopped at an instruction which has
3891 attempted to write to a piece of memory under control of
3892 a watchpoint. The instruction hasn't actually executed
3893 yet. If we were to evaluate the watchpoint expression
3894 now, we would get the old value, and therefore no change
3895 would seem to have occurred.
3896
3897 In order to make watchpoints work `right', we really need
3898 to complete the memory write, and then evaluate the
3899 watchpoint expression. We do this by single-stepping the
3900 target.
3901
3902 It may not be necessary to disable the watchpoint to stop over
3903 it. For example, the PA can (with some kernel cooperation)
3904 single step over a watchpoint without disabling the watchpoint.
3905
3906 It is far more common to need to disable a watchpoint to step
3907 the inferior over it. If we have non-steppable watchpoints,
3908 we must disable the current watchpoint; it's simplest to
3909 disable all watchpoints and breakpoints. */
3910 int hw_step = 1;
3911
3912 if (!target_have_steppable_watchpoint)
3913 {
3914 remove_breakpoints ();
3915 /* See comment in resume why we need to stop bypassing signals
3916 while breakpoints have been removed. */
3917 target_pass_signals (0, NULL);
3918 }
3919 /* Single step */
3920 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3921 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3922 waiton_ptid = ecs->ptid;
3923 if (target_have_steppable_watchpoint)
3924 infwait_state = infwait_step_watch_state;
3925 else
3926 infwait_state = infwait_nonstep_watch_state;
3927 prepare_to_wait (ecs);
3928 return;
3929 }
3930
3931 ecs->stop_func_start = 0;
3932 ecs->stop_func_end = 0;
3933 ecs->stop_func_name = 0;
3934 /* Don't care about return value; stop_func_start and stop_func_name
3935 will both be 0 if it doesn't work. */
3936 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3937 &ecs->stop_func_start, &ecs->stop_func_end);
3938 ecs->stop_func_start
3939 += gdbarch_deprecated_function_start_offset (gdbarch);
3940 ecs->event_thread->stepping_over_breakpoint = 0;
3941 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
3942 ecs->event_thread->control.stop_step = 0;
3943 stop_print_frame = 1;
3944 ecs->random_signal = 0;
3945 stopped_by_random_signal = 0;
3946
3947 /* Hide inlined functions starting here, unless we just performed stepi or
3948 nexti. After stepi and nexti, always show the innermost frame (not any
3949 inline function call sites). */
3950 if (ecs->event_thread->control.step_range_end != 1)
3951 skip_inline_frames (ecs->ptid);
3952
3953 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3954 && ecs->event_thread->control.trap_expected
3955 && gdbarch_single_step_through_delay_p (gdbarch)
3956 && currently_stepping (ecs->event_thread))
3957 {
3958 /* We're trying to step off a breakpoint. Turns out that we're
3959 also on an instruction that needs to be stepped multiple
3960 times before it's been fully executing. E.g., architectures
3961 with a delay slot. It needs to be stepped twice, once for
3962 the instruction and once for the delay slot. */
3963 int step_through_delay
3964 = gdbarch_single_step_through_delay (gdbarch, frame);
3965
3966 if (debug_infrun && step_through_delay)
3967 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3968 if (ecs->event_thread->control.step_range_end == 0
3969 && step_through_delay)
3970 {
3971 /* The user issued a continue when stopped at a breakpoint.
3972 Set up for another trap and get out of here. */
3973 ecs->event_thread->stepping_over_breakpoint = 1;
3974 keep_going (ecs);
3975 return;
3976 }
3977 else if (step_through_delay)
3978 {
3979 /* The user issued a step when stopped at a breakpoint.
3980 Maybe we should stop, maybe we should not - the delay
3981 slot *might* correspond to a line of source. In any
3982 case, don't decide that here, just set
3983 ecs->stepping_over_breakpoint, making sure we
3984 single-step again before breakpoints are re-inserted. */
3985 ecs->event_thread->stepping_over_breakpoint = 1;
3986 }
3987 }
3988
3989 /* Look at the cause of the stop, and decide what to do.
3990 The alternatives are:
3991 1) stop_stepping and return; to really stop and return to the debugger,
3992 2) keep_going and return to start up again
3993 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3994 3) set ecs->random_signal to 1, and the decision between 1 and 2
3995 will be made according to the signal handling tables. */
3996
3997 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
3998 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3999 || stop_soon == STOP_QUIETLY_REMOTE)
4000 {
4001 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4002 && stop_after_trap)
4003 {
4004 if (debug_infrun)
4005 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4006 stop_print_frame = 0;
4007 stop_stepping (ecs);
4008 return;
4009 }
4010
4011 /* This is originated from start_remote(), start_inferior() and
4012 shared libraries hook functions. */
4013 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4014 {
4015 if (debug_infrun)
4016 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4017 stop_stepping (ecs);
4018 return;
4019 }
4020
4021 /* This originates from attach_command(). We need to overwrite
4022 the stop_signal here, because some kernels don't ignore a
4023 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4024 See more comments in inferior.h. On the other hand, if we
4025 get a non-SIGSTOP, report it to the user - assume the backend
4026 will handle the SIGSTOP if it should show up later.
4027
4028 Also consider that the attach is complete when we see a
4029 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4030 target extended-remote report it instead of a SIGSTOP
4031 (e.g. gdbserver). We already rely on SIGTRAP being our
4032 signal, so this is no exception.
4033
4034 Also consider that the attach is complete when we see a
4035 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4036 the target to stop all threads of the inferior, in case the
4037 low level attach operation doesn't stop them implicitly. If
4038 they weren't stopped implicitly, then the stub will report a
4039 TARGET_SIGNAL_0, meaning: stopped for no particular reason
4040 other than GDB's request. */
4041 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4042 && (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_STOP
4043 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4044 || ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_0))
4045 {
4046 stop_stepping (ecs);
4047 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4048 return;
4049 }
4050
4051 /* See if there is a breakpoint at the current PC. */
4052 ecs->event_thread->control.stop_bpstat
4053 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4054 stop_pc, ecs->ptid);
4055
4056 /* Following in case break condition called a
4057 function. */
4058 stop_print_frame = 1;
4059
4060 /* This is where we handle "moribund" watchpoints. Unlike
4061 software breakpoints traps, hardware watchpoint traps are
4062 always distinguishable from random traps. If no high-level
4063 watchpoint is associated with the reported stop data address
4064 anymore, then the bpstat does not explain the signal ---
4065 simply make sure to ignore it if `stopped_by_watchpoint' is
4066 set. */
4067
4068 if (debug_infrun
4069 && ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
4070 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4071 && stopped_by_watchpoint)
4072 fprintf_unfiltered (gdb_stdlog,
4073 "infrun: no user watchpoint explains "
4074 "watchpoint SIGTRAP, ignoring\n");
4075
4076 /* NOTE: cagney/2003-03-29: These two checks for a random signal
4077 at one stage in the past included checks for an inferior
4078 function call's call dummy's return breakpoint. The original
4079 comment, that went with the test, read:
4080
4081 ``End of a stack dummy. Some systems (e.g. Sony news) give
4082 another signal besides SIGTRAP, so check here as well as
4083 above.''
4084
4085 If someone ever tries to get call dummys on a
4086 non-executable stack to work (where the target would stop
4087 with something like a SIGSEGV), then those tests might need
4088 to be re-instated. Given, however, that the tests were only
4089 enabled when momentary breakpoints were not being used, I
4090 suspect that it won't be the case.
4091
4092 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4093 be necessary for call dummies on a non-executable stack on
4094 SPARC. */
4095
4096 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP)
4097 ecs->random_signal
4098 = !(bpstat_explains_signal (ecs->event_thread->control.stop_bpstat)
4099 || stopped_by_watchpoint
4100 || ecs->event_thread->control.trap_expected
4101 || (ecs->event_thread->control.step_range_end
4102 && (ecs->event_thread->control.step_resume_breakpoint
4103 == NULL)));
4104 else
4105 {
4106 ecs->random_signal = !bpstat_explains_signal
4107 (ecs->event_thread->control.stop_bpstat);
4108 if (!ecs->random_signal)
4109 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_TRAP;
4110 }
4111 }
4112
4113 /* When we reach this point, we've pretty much decided
4114 that the reason for stopping must've been a random
4115 (unexpected) signal. */
4116
4117 else
4118 ecs->random_signal = 1;
4119
4120 process_event_stop_test:
4121
4122 /* Re-fetch current thread's frame in case we did a
4123 "goto process_event_stop_test" above. */
4124 frame = get_current_frame ();
4125 gdbarch = get_frame_arch (frame);
4126
4127 /* For the program's own signals, act according to
4128 the signal handling tables. */
4129
4130 if (ecs->random_signal)
4131 {
4132 /* Signal not for debugging purposes. */
4133 int printed = 0;
4134 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
4135
4136 if (debug_infrun)
4137 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
4138 ecs->event_thread->suspend.stop_signal);
4139
4140 stopped_by_random_signal = 1;
4141
4142 if (signal_print[ecs->event_thread->suspend.stop_signal])
4143 {
4144 printed = 1;
4145 target_terminal_ours_for_output ();
4146 print_signal_received_reason
4147 (ecs->event_thread->suspend.stop_signal);
4148 }
4149 /* Always stop on signals if we're either just gaining control
4150 of the program, or the user explicitly requested this thread
4151 to remain stopped. */
4152 if (stop_soon != NO_STOP_QUIETLY
4153 || ecs->event_thread->stop_requested
4154 || (!inf->detaching
4155 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4156 {
4157 stop_stepping (ecs);
4158 return;
4159 }
4160 /* If not going to stop, give terminal back
4161 if we took it away. */
4162 else if (printed)
4163 target_terminal_inferior ();
4164
4165 /* Clear the signal if it should not be passed. */
4166 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4167 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
4168
4169 if (ecs->event_thread->prev_pc == stop_pc
4170 && ecs->event_thread->control.trap_expected
4171 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4172 {
4173 /* We were just starting a new sequence, attempting to
4174 single-step off of a breakpoint and expecting a SIGTRAP.
4175 Instead this signal arrives. This signal will take us out
4176 of the stepping range so GDB needs to remember to, when
4177 the signal handler returns, resume stepping off that
4178 breakpoint. */
4179 /* To simplify things, "continue" is forced to use the same
4180 code paths as single-step - set a breakpoint at the
4181 signal return address and then, once hit, step off that
4182 breakpoint. */
4183 if (debug_infrun)
4184 fprintf_unfiltered (gdb_stdlog,
4185 "infrun: signal arrived while stepping over "
4186 "breakpoint\n");
4187
4188 insert_hp_step_resume_breakpoint_at_frame (frame);
4189 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4190 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4191 ecs->event_thread->control.trap_expected = 0;
4192 keep_going (ecs);
4193 return;
4194 }
4195
4196 if (ecs->event_thread->control.step_range_end != 0
4197 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_0
4198 && (ecs->event_thread->control.step_range_start <= stop_pc
4199 && stop_pc < ecs->event_thread->control.step_range_end)
4200 && frame_id_eq (get_stack_frame_id (frame),
4201 ecs->event_thread->control.step_stack_frame_id)
4202 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4203 {
4204 /* The inferior is about to take a signal that will take it
4205 out of the single step range. Set a breakpoint at the
4206 current PC (which is presumably where the signal handler
4207 will eventually return) and then allow the inferior to
4208 run free.
4209
4210 Note that this is only needed for a signal delivered
4211 while in the single-step range. Nested signals aren't a
4212 problem as they eventually all return. */
4213 if (debug_infrun)
4214 fprintf_unfiltered (gdb_stdlog,
4215 "infrun: signal may take us out of "
4216 "single-step range\n");
4217
4218 insert_hp_step_resume_breakpoint_at_frame (frame);
4219 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4220 ecs->event_thread->control.trap_expected = 0;
4221 keep_going (ecs);
4222 return;
4223 }
4224
4225 /* Note: step_resume_breakpoint may be non-NULL. This occures
4226 when either there's a nested signal, or when there's a
4227 pending signal enabled just as the signal handler returns
4228 (leaving the inferior at the step-resume-breakpoint without
4229 actually executing it). Either way continue until the
4230 breakpoint is really hit. */
4231 keep_going (ecs);
4232 return;
4233 }
4234
4235 /* Handle cases caused by hitting a breakpoint. */
4236 {
4237 CORE_ADDR jmp_buf_pc;
4238 struct bpstat_what what;
4239
4240 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4241
4242 if (what.call_dummy)
4243 {
4244 stop_stack_dummy = what.call_dummy;
4245 }
4246
4247 /* If we hit an internal event that triggers symbol changes, the
4248 current frame will be invalidated within bpstat_what (e.g., if
4249 we hit an internal solib event). Re-fetch it. */
4250 frame = get_current_frame ();
4251 gdbarch = get_frame_arch (frame);
4252
4253 switch (what.main_action)
4254 {
4255 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4256 /* If we hit the breakpoint at longjmp while stepping, we
4257 install a momentary breakpoint at the target of the
4258 jmp_buf. */
4259
4260 if (debug_infrun)
4261 fprintf_unfiltered (gdb_stdlog,
4262 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4263
4264 ecs->event_thread->stepping_over_breakpoint = 1;
4265
4266 if (what.is_longjmp)
4267 {
4268 if (!gdbarch_get_longjmp_target_p (gdbarch)
4269 || !gdbarch_get_longjmp_target (gdbarch,
4270 frame, &jmp_buf_pc))
4271 {
4272 if (debug_infrun)
4273 fprintf_unfiltered (gdb_stdlog,
4274 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4275 "(!gdbarch_get_longjmp_target)\n");
4276 keep_going (ecs);
4277 return;
4278 }
4279
4280 /* We're going to replace the current step-resume breakpoint
4281 with a longjmp-resume breakpoint. */
4282 delete_step_resume_breakpoint (ecs->event_thread);
4283
4284 /* Insert a breakpoint at resume address. */
4285 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4286 }
4287 else
4288 {
4289 struct symbol *func = get_frame_function (frame);
4290
4291 if (func)
4292 check_exception_resume (ecs, frame, func);
4293 }
4294 keep_going (ecs);
4295 return;
4296
4297 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4298 if (debug_infrun)
4299 fprintf_unfiltered (gdb_stdlog,
4300 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4301
4302 if (what.is_longjmp)
4303 {
4304 gdb_assert (ecs->event_thread->control.step_resume_breakpoint
4305 != NULL);
4306 delete_step_resume_breakpoint (ecs->event_thread);
4307 }
4308 else
4309 {
4310 /* There are several cases to consider.
4311
4312 1. The initiating frame no longer exists. In this case
4313 we must stop, because the exception has gone too far.
4314
4315 2. The initiating frame exists, and is the same as the
4316 current frame. We stop, because the exception has been
4317 caught.
4318
4319 3. The initiating frame exists and is different from
4320 the current frame. This means the exception has been
4321 caught beneath the initiating frame, so keep going. */
4322 struct frame_info *init_frame
4323 = frame_find_by_id (ecs->event_thread->initiating_frame);
4324
4325 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4326 != NULL);
4327 delete_exception_resume_breakpoint (ecs->event_thread);
4328
4329 if (init_frame)
4330 {
4331 struct frame_id current_id
4332 = get_frame_id (get_current_frame ());
4333 if (frame_id_eq (current_id,
4334 ecs->event_thread->initiating_frame))
4335 {
4336 /* Case 2. Fall through. */
4337 }
4338 else
4339 {
4340 /* Case 3. */
4341 keep_going (ecs);
4342 return;
4343 }
4344 }
4345
4346 /* For Cases 1 and 2, remove the step-resume breakpoint,
4347 if it exists. */
4348 delete_step_resume_breakpoint (ecs->event_thread);
4349 }
4350
4351 ecs->event_thread->control.stop_step = 1;
4352 print_end_stepping_range_reason ();
4353 stop_stepping (ecs);
4354 return;
4355
4356 case BPSTAT_WHAT_SINGLE:
4357 if (debug_infrun)
4358 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4359 ecs->event_thread->stepping_over_breakpoint = 1;
4360 /* Still need to check other stuff, at least the case
4361 where we are stepping and step out of the right range. */
4362 break;
4363
4364 case BPSTAT_WHAT_STEP_RESUME:
4365 if (debug_infrun)
4366 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4367
4368 delete_step_resume_breakpoint (ecs->event_thread);
4369 if (ecs->event_thread->control.proceed_to_finish
4370 && execution_direction == EXEC_REVERSE)
4371 {
4372 struct thread_info *tp = ecs->event_thread;
4373
4374 /* We are finishing a function in reverse, and just hit
4375 the step-resume breakpoint at the start address of the
4376 function, and we're almost there -- just need to back
4377 up by one more single-step, which should take us back
4378 to the function call. */
4379 tp->control.step_range_start = tp->control.step_range_end = 1;
4380 keep_going (ecs);
4381 return;
4382 }
4383 if (stop_pc == ecs->stop_func_start
4384 && execution_direction == EXEC_REVERSE)
4385 {
4386 /* We are stepping over a function call in reverse, and
4387 just hit the step-resume breakpoint at the start
4388 address of the function. Go back to single-stepping,
4389 which should take us back to the function call. */
4390 ecs->event_thread->stepping_over_breakpoint = 1;
4391 keep_going (ecs);
4392 return;
4393 }
4394 break;
4395
4396 case BPSTAT_WHAT_STOP_NOISY:
4397 if (debug_infrun)
4398 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4399 stop_print_frame = 1;
4400
4401 /* We are about to nuke the step_resume_breakpointt via the
4402 cleanup chain, so no need to worry about it here. */
4403
4404 stop_stepping (ecs);
4405 return;
4406
4407 case BPSTAT_WHAT_STOP_SILENT:
4408 if (debug_infrun)
4409 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4410 stop_print_frame = 0;
4411
4412 /* We are about to nuke the step_resume_breakpoin via the
4413 cleanup chain, so no need to worry about it here. */
4414
4415 stop_stepping (ecs);
4416 return;
4417
4418 case BPSTAT_WHAT_HP_STEP_RESUME:
4419 if (debug_infrun)
4420 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4421
4422 delete_step_resume_breakpoint (ecs->event_thread);
4423 if (ecs->event_thread->step_after_step_resume_breakpoint)
4424 {
4425 /* Back when the step-resume breakpoint was inserted, we
4426 were trying to single-step off a breakpoint. Go back
4427 to doing that. */
4428 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4429 ecs->event_thread->stepping_over_breakpoint = 1;
4430 keep_going (ecs);
4431 return;
4432 }
4433 break;
4434
4435 case BPSTAT_WHAT_KEEP_CHECKING:
4436 break;
4437 }
4438 }
4439
4440 /* We come here if we hit a breakpoint but should not
4441 stop for it. Possibly we also were stepping
4442 and should stop for that. So fall through and
4443 test for stepping. But, if not stepping,
4444 do not stop. */
4445
4446 /* In all-stop mode, if we're currently stepping but have stopped in
4447 some other thread, we need to switch back to the stepped thread. */
4448 if (!non_stop)
4449 {
4450 struct thread_info *tp;
4451
4452 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4453 ecs->event_thread);
4454 if (tp)
4455 {
4456 /* However, if the current thread is blocked on some internal
4457 breakpoint, and we simply need to step over that breakpoint
4458 to get it going again, do that first. */
4459 if ((ecs->event_thread->control.trap_expected
4460 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
4461 || ecs->event_thread->stepping_over_breakpoint)
4462 {
4463 keep_going (ecs);
4464 return;
4465 }
4466
4467 /* If the stepping thread exited, then don't try to switch
4468 back and resume it, which could fail in several different
4469 ways depending on the target. Instead, just keep going.
4470
4471 We can find a stepping dead thread in the thread list in
4472 two cases:
4473
4474 - The target supports thread exit events, and when the
4475 target tries to delete the thread from the thread list,
4476 inferior_ptid pointed at the exiting thread. In such
4477 case, calling delete_thread does not really remove the
4478 thread from the list; instead, the thread is left listed,
4479 with 'exited' state.
4480
4481 - The target's debug interface does not support thread
4482 exit events, and so we have no idea whatsoever if the
4483 previously stepping thread is still alive. For that
4484 reason, we need to synchronously query the target
4485 now. */
4486 if (is_exited (tp->ptid)
4487 || !target_thread_alive (tp->ptid))
4488 {
4489 if (debug_infrun)
4490 fprintf_unfiltered (gdb_stdlog,
4491 "infrun: not switching back to "
4492 "stepped thread, it has vanished\n");
4493
4494 delete_thread (tp->ptid);
4495 keep_going (ecs);
4496 return;
4497 }
4498
4499 /* Otherwise, we no longer expect a trap in the current thread.
4500 Clear the trap_expected flag before switching back -- this is
4501 what keep_going would do as well, if we called it. */
4502 ecs->event_thread->control.trap_expected = 0;
4503
4504 if (debug_infrun)
4505 fprintf_unfiltered (gdb_stdlog,
4506 "infrun: switching back to stepped thread\n");
4507
4508 ecs->event_thread = tp;
4509 ecs->ptid = tp->ptid;
4510 context_switch (ecs->ptid);
4511 keep_going (ecs);
4512 return;
4513 }
4514 }
4515
4516 /* Are we stepping to get the inferior out of the dynamic linker's
4517 hook (and possibly the dld itself) after catching a shlib
4518 event? */
4519 if (ecs->event_thread->stepping_through_solib_after_catch)
4520 {
4521 #if defined(SOLIB_ADD)
4522 /* Have we reached our destination? If not, keep going. */
4523 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4524 {
4525 if (debug_infrun)
4526 fprintf_unfiltered (gdb_stdlog,
4527 "infrun: stepping in dynamic linker\n");
4528 ecs->event_thread->stepping_over_breakpoint = 1;
4529 keep_going (ecs);
4530 return;
4531 }
4532 #endif
4533 if (debug_infrun)
4534 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4535 /* Else, stop and report the catchpoint(s) whose triggering
4536 caused us to begin stepping. */
4537 ecs->event_thread->stepping_through_solib_after_catch = 0;
4538 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4539 ecs->event_thread->control.stop_bpstat
4540 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4541 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4542 stop_print_frame = 1;
4543 stop_stepping (ecs);
4544 return;
4545 }
4546
4547 if (ecs->event_thread->control.step_resume_breakpoint)
4548 {
4549 if (debug_infrun)
4550 fprintf_unfiltered (gdb_stdlog,
4551 "infrun: step-resume breakpoint is inserted\n");
4552
4553 /* Having a step-resume breakpoint overrides anything
4554 else having to do with stepping commands until
4555 that breakpoint is reached. */
4556 keep_going (ecs);
4557 return;
4558 }
4559
4560 if (ecs->event_thread->control.step_range_end == 0)
4561 {
4562 if (debug_infrun)
4563 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4564 /* Likewise if we aren't even stepping. */
4565 keep_going (ecs);
4566 return;
4567 }
4568
4569 /* Re-fetch current thread's frame in case the code above caused
4570 the frame cache to be re-initialized, making our FRAME variable
4571 a dangling pointer. */
4572 frame = get_current_frame ();
4573 gdbarch = get_frame_arch (frame);
4574
4575 /* If stepping through a line, keep going if still within it.
4576
4577 Note that step_range_end is the address of the first instruction
4578 beyond the step range, and NOT the address of the last instruction
4579 within it!
4580
4581 Note also that during reverse execution, we may be stepping
4582 through a function epilogue and therefore must detect when
4583 the current-frame changes in the middle of a line. */
4584
4585 if (stop_pc >= ecs->event_thread->control.step_range_start
4586 && stop_pc < ecs->event_thread->control.step_range_end
4587 && (execution_direction != EXEC_REVERSE
4588 || frame_id_eq (get_frame_id (frame),
4589 ecs->event_thread->control.step_frame_id)))
4590 {
4591 if (debug_infrun)
4592 fprintf_unfiltered
4593 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4594 paddress (gdbarch, ecs->event_thread->control.step_range_start),
4595 paddress (gdbarch, ecs->event_thread->control.step_range_end));
4596
4597 /* When stepping backward, stop at beginning of line range
4598 (unless it's the function entry point, in which case
4599 keep going back to the call point). */
4600 if (stop_pc == ecs->event_thread->control.step_range_start
4601 && stop_pc != ecs->stop_func_start
4602 && execution_direction == EXEC_REVERSE)
4603 {
4604 ecs->event_thread->control.stop_step = 1;
4605 print_end_stepping_range_reason ();
4606 stop_stepping (ecs);
4607 }
4608 else
4609 keep_going (ecs);
4610
4611 return;
4612 }
4613
4614 /* We stepped out of the stepping range. */
4615
4616 /* If we are stepping at the source level and entered the runtime
4617 loader dynamic symbol resolution code...
4618
4619 EXEC_FORWARD: we keep on single stepping until we exit the run
4620 time loader code and reach the callee's address.
4621
4622 EXEC_REVERSE: we've already executed the callee (backward), and
4623 the runtime loader code is handled just like any other
4624 undebuggable function call. Now we need only keep stepping
4625 backward through the trampoline code, and that's handled further
4626 down, so there is nothing for us to do here. */
4627
4628 if (execution_direction != EXEC_REVERSE
4629 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4630 && in_solib_dynsym_resolve_code (stop_pc))
4631 {
4632 CORE_ADDR pc_after_resolver =
4633 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4634
4635 if (debug_infrun)
4636 fprintf_unfiltered (gdb_stdlog,
4637 "infrun: stepped into dynsym resolve code\n");
4638
4639 if (pc_after_resolver)
4640 {
4641 /* Set up a step-resume breakpoint at the address
4642 indicated by SKIP_SOLIB_RESOLVER. */
4643 struct symtab_and_line sr_sal;
4644
4645 init_sal (&sr_sal);
4646 sr_sal.pc = pc_after_resolver;
4647 sr_sal.pspace = get_frame_program_space (frame);
4648
4649 insert_step_resume_breakpoint_at_sal (gdbarch,
4650 sr_sal, null_frame_id);
4651 }
4652
4653 keep_going (ecs);
4654 return;
4655 }
4656
4657 if (ecs->event_thread->control.step_range_end != 1
4658 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4659 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4660 && get_frame_type (frame) == SIGTRAMP_FRAME)
4661 {
4662 if (debug_infrun)
4663 fprintf_unfiltered (gdb_stdlog,
4664 "infrun: stepped into signal trampoline\n");
4665 /* The inferior, while doing a "step" or "next", has ended up in
4666 a signal trampoline (either by a signal being delivered or by
4667 the signal handler returning). Just single-step until the
4668 inferior leaves the trampoline (either by calling the handler
4669 or returning). */
4670 keep_going (ecs);
4671 return;
4672 }
4673
4674 /* Check for subroutine calls. The check for the current frame
4675 equalling the step ID is not necessary - the check of the
4676 previous frame's ID is sufficient - but it is a common case and
4677 cheaper than checking the previous frame's ID.
4678
4679 NOTE: frame_id_eq will never report two invalid frame IDs as
4680 being equal, so to get into this block, both the current and
4681 previous frame must have valid frame IDs. */
4682 /* The outer_frame_id check is a heuristic to detect stepping
4683 through startup code. If we step over an instruction which
4684 sets the stack pointer from an invalid value to a valid value,
4685 we may detect that as a subroutine call from the mythical
4686 "outermost" function. This could be fixed by marking
4687 outermost frames as !stack_p,code_p,special_p. Then the
4688 initial outermost frame, before sp was valid, would
4689 have code_addr == &_start. See the comment in frame_id_eq
4690 for more. */
4691 if (!frame_id_eq (get_stack_frame_id (frame),
4692 ecs->event_thread->control.step_stack_frame_id)
4693 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4694 ecs->event_thread->control.step_stack_frame_id)
4695 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
4696 outer_frame_id)
4697 || step_start_function != find_pc_function (stop_pc))))
4698 {
4699 CORE_ADDR real_stop_pc;
4700
4701 if (debug_infrun)
4702 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4703
4704 if ((ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
4705 || ((ecs->event_thread->control.step_range_end == 1)
4706 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4707 ecs->stop_func_start)))
4708 {
4709 /* I presume that step_over_calls is only 0 when we're
4710 supposed to be stepping at the assembly language level
4711 ("stepi"). Just stop. */
4712 /* Also, maybe we just did a "nexti" inside a prolog, so we
4713 thought it was a subroutine call but it was not. Stop as
4714 well. FENN */
4715 /* And this works the same backward as frontward. MVS */
4716 ecs->event_thread->control.stop_step = 1;
4717 print_end_stepping_range_reason ();
4718 stop_stepping (ecs);
4719 return;
4720 }
4721
4722 /* Reverse stepping through solib trampolines. */
4723
4724 if (execution_direction == EXEC_REVERSE
4725 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
4726 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4727 || (ecs->stop_func_start == 0
4728 && in_solib_dynsym_resolve_code (stop_pc))))
4729 {
4730 /* Any solib trampoline code can be handled in reverse
4731 by simply continuing to single-step. We have already
4732 executed the solib function (backwards), and a few
4733 steps will take us back through the trampoline to the
4734 caller. */
4735 keep_going (ecs);
4736 return;
4737 }
4738
4739 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
4740 {
4741 /* We're doing a "next".
4742
4743 Normal (forward) execution: set a breakpoint at the
4744 callee's return address (the address at which the caller
4745 will resume).
4746
4747 Reverse (backward) execution. set the step-resume
4748 breakpoint at the start of the function that we just
4749 stepped into (backwards), and continue to there. When we
4750 get there, we'll need to single-step back to the caller. */
4751
4752 if (execution_direction == EXEC_REVERSE)
4753 {
4754 struct symtab_and_line sr_sal;
4755
4756 /* Normal function call return (static or dynamic). */
4757 init_sal (&sr_sal);
4758 sr_sal.pc = ecs->stop_func_start;
4759 sr_sal.pspace = get_frame_program_space (frame);
4760 insert_step_resume_breakpoint_at_sal (gdbarch,
4761 sr_sal, null_frame_id);
4762 }
4763 else
4764 insert_step_resume_breakpoint_at_caller (frame);
4765
4766 keep_going (ecs);
4767 return;
4768 }
4769
4770 /* If we are in a function call trampoline (a stub between the
4771 calling routine and the real function), locate the real
4772 function. That's what tells us (a) whether we want to step
4773 into it at all, and (b) what prologue we want to run to the
4774 end of, if we do step into it. */
4775 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4776 if (real_stop_pc == 0)
4777 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4778 if (real_stop_pc != 0)
4779 ecs->stop_func_start = real_stop_pc;
4780
4781 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4782 {
4783 struct symtab_and_line sr_sal;
4784
4785 init_sal (&sr_sal);
4786 sr_sal.pc = ecs->stop_func_start;
4787 sr_sal.pspace = get_frame_program_space (frame);
4788
4789 insert_step_resume_breakpoint_at_sal (gdbarch,
4790 sr_sal, null_frame_id);
4791 keep_going (ecs);
4792 return;
4793 }
4794
4795 /* If we have line number information for the function we are
4796 thinking of stepping into, step into it.
4797
4798 If there are several symtabs at that PC (e.g. with include
4799 files), just want to know whether *any* of them have line
4800 numbers. find_pc_line handles this. */
4801 {
4802 struct symtab_and_line tmp_sal;
4803
4804 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4805 if (tmp_sal.line != 0)
4806 {
4807 if (execution_direction == EXEC_REVERSE)
4808 handle_step_into_function_backward (gdbarch, ecs);
4809 else
4810 handle_step_into_function (gdbarch, ecs);
4811 return;
4812 }
4813 }
4814
4815 /* If we have no line number and the step-stop-if-no-debug is
4816 set, we stop the step so that the user has a chance to switch
4817 in assembly mode. */
4818 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4819 && step_stop_if_no_debug)
4820 {
4821 ecs->event_thread->control.stop_step = 1;
4822 print_end_stepping_range_reason ();
4823 stop_stepping (ecs);
4824 return;
4825 }
4826
4827 if (execution_direction == EXEC_REVERSE)
4828 {
4829 /* Set a breakpoint at callee's start address.
4830 From there we can step once and be back in the caller. */
4831 struct symtab_and_line sr_sal;
4832
4833 init_sal (&sr_sal);
4834 sr_sal.pc = ecs->stop_func_start;
4835 sr_sal.pspace = get_frame_program_space (frame);
4836 insert_step_resume_breakpoint_at_sal (gdbarch,
4837 sr_sal, null_frame_id);
4838 }
4839 else
4840 /* Set a breakpoint at callee's return address (the address
4841 at which the caller will resume). */
4842 insert_step_resume_breakpoint_at_caller (frame);
4843
4844 keep_going (ecs);
4845 return;
4846 }
4847
4848 /* Reverse stepping through solib trampolines. */
4849
4850 if (execution_direction == EXEC_REVERSE
4851 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
4852 {
4853 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4854 || (ecs->stop_func_start == 0
4855 && in_solib_dynsym_resolve_code (stop_pc)))
4856 {
4857 /* Any solib trampoline code can be handled in reverse
4858 by simply continuing to single-step. We have already
4859 executed the solib function (backwards), and a few
4860 steps will take us back through the trampoline to the
4861 caller. */
4862 keep_going (ecs);
4863 return;
4864 }
4865 else if (in_solib_dynsym_resolve_code (stop_pc))
4866 {
4867 /* Stepped backward into the solib dynsym resolver.
4868 Set a breakpoint at its start and continue, then
4869 one more step will take us out. */
4870 struct symtab_and_line sr_sal;
4871
4872 init_sal (&sr_sal);
4873 sr_sal.pc = ecs->stop_func_start;
4874 sr_sal.pspace = get_frame_program_space (frame);
4875 insert_step_resume_breakpoint_at_sal (gdbarch,
4876 sr_sal, null_frame_id);
4877 keep_going (ecs);
4878 return;
4879 }
4880 }
4881
4882 /* If we're in the return path from a shared library trampoline,
4883 we want to proceed through the trampoline when stepping. */
4884 if (gdbarch_in_solib_return_trampoline (gdbarch,
4885 stop_pc, ecs->stop_func_name))
4886 {
4887 /* Determine where this trampoline returns. */
4888 CORE_ADDR real_stop_pc;
4889
4890 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4891
4892 if (debug_infrun)
4893 fprintf_unfiltered (gdb_stdlog,
4894 "infrun: stepped into solib return tramp\n");
4895
4896 /* Only proceed through if we know where it's going. */
4897 if (real_stop_pc)
4898 {
4899 /* And put the step-breakpoint there and go until there. */
4900 struct symtab_and_line sr_sal;
4901
4902 init_sal (&sr_sal); /* initialize to zeroes */
4903 sr_sal.pc = real_stop_pc;
4904 sr_sal.section = find_pc_overlay (sr_sal.pc);
4905 sr_sal.pspace = get_frame_program_space (frame);
4906
4907 /* Do not specify what the fp should be when we stop since
4908 on some machines the prologue is where the new fp value
4909 is established. */
4910 insert_step_resume_breakpoint_at_sal (gdbarch,
4911 sr_sal, null_frame_id);
4912
4913 /* Restart without fiddling with the step ranges or
4914 other state. */
4915 keep_going (ecs);
4916 return;
4917 }
4918 }
4919
4920 stop_pc_sal = find_pc_line (stop_pc, 0);
4921
4922 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4923 the trampoline processing logic, however, there are some trampolines
4924 that have no names, so we should do trampoline handling first. */
4925 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
4926 && ecs->stop_func_name == NULL
4927 && stop_pc_sal.line == 0)
4928 {
4929 if (debug_infrun)
4930 fprintf_unfiltered (gdb_stdlog,
4931 "infrun: stepped into undebuggable function\n");
4932
4933 /* The inferior just stepped into, or returned to, an
4934 undebuggable function (where there is no debugging information
4935 and no line number corresponding to the address where the
4936 inferior stopped). Since we want to skip this kind of code,
4937 we keep going until the inferior returns from this
4938 function - unless the user has asked us not to (via
4939 set step-mode) or we no longer know how to get back
4940 to the call site. */
4941 if (step_stop_if_no_debug
4942 || !frame_id_p (frame_unwind_caller_id (frame)))
4943 {
4944 /* If we have no line number and the step-stop-if-no-debug
4945 is set, we stop the step so that the user has a chance to
4946 switch in assembly mode. */
4947 ecs->event_thread->control.stop_step = 1;
4948 print_end_stepping_range_reason ();
4949 stop_stepping (ecs);
4950 return;
4951 }
4952 else
4953 {
4954 /* Set a breakpoint at callee's return address (the address
4955 at which the caller will resume). */
4956 insert_step_resume_breakpoint_at_caller (frame);
4957 keep_going (ecs);
4958 return;
4959 }
4960 }
4961
4962 if (ecs->event_thread->control.step_range_end == 1)
4963 {
4964 /* It is stepi or nexti. We always want to stop stepping after
4965 one instruction. */
4966 if (debug_infrun)
4967 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4968 ecs->event_thread->control.stop_step = 1;
4969 print_end_stepping_range_reason ();
4970 stop_stepping (ecs);
4971 return;
4972 }
4973
4974 if (stop_pc_sal.line == 0)
4975 {
4976 /* We have no line number information. That means to stop
4977 stepping (does this always happen right after one instruction,
4978 when we do "s" in a function with no line numbers,
4979 or can this happen as a result of a return or longjmp?). */
4980 if (debug_infrun)
4981 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4982 ecs->event_thread->control.stop_step = 1;
4983 print_end_stepping_range_reason ();
4984 stop_stepping (ecs);
4985 return;
4986 }
4987
4988 /* Look for "calls" to inlined functions, part one. If the inline
4989 frame machinery detected some skipped call sites, we have entered
4990 a new inline function. */
4991
4992 if (frame_id_eq (get_frame_id (get_current_frame ()),
4993 ecs->event_thread->control.step_frame_id)
4994 && inline_skipped_frames (ecs->ptid))
4995 {
4996 struct symtab_and_line call_sal;
4997
4998 if (debug_infrun)
4999 fprintf_unfiltered (gdb_stdlog,
5000 "infrun: stepped into inlined function\n");
5001
5002 find_frame_sal (get_current_frame (), &call_sal);
5003
5004 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5005 {
5006 /* For "step", we're going to stop. But if the call site
5007 for this inlined function is on the same source line as
5008 we were previously stepping, go down into the function
5009 first. Otherwise stop at the call site. */
5010
5011 if (call_sal.line == ecs->event_thread->current_line
5012 && call_sal.symtab == ecs->event_thread->current_symtab)
5013 step_into_inline_frame (ecs->ptid);
5014
5015 ecs->event_thread->control.stop_step = 1;
5016 print_end_stepping_range_reason ();
5017 stop_stepping (ecs);
5018 return;
5019 }
5020 else
5021 {
5022 /* For "next", we should stop at the call site if it is on a
5023 different source line. Otherwise continue through the
5024 inlined function. */
5025 if (call_sal.line == ecs->event_thread->current_line
5026 && call_sal.symtab == ecs->event_thread->current_symtab)
5027 keep_going (ecs);
5028 else
5029 {
5030 ecs->event_thread->control.stop_step = 1;
5031 print_end_stepping_range_reason ();
5032 stop_stepping (ecs);
5033 }
5034 return;
5035 }
5036 }
5037
5038 /* Look for "calls" to inlined functions, part two. If we are still
5039 in the same real function we were stepping through, but we have
5040 to go further up to find the exact frame ID, we are stepping
5041 through a more inlined call beyond its call site. */
5042
5043 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5044 && !frame_id_eq (get_frame_id (get_current_frame ()),
5045 ecs->event_thread->control.step_frame_id)
5046 && stepped_in_from (get_current_frame (),
5047 ecs->event_thread->control.step_frame_id))
5048 {
5049 if (debug_infrun)
5050 fprintf_unfiltered (gdb_stdlog,
5051 "infrun: stepping through inlined function\n");
5052
5053 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5054 keep_going (ecs);
5055 else
5056 {
5057 ecs->event_thread->control.stop_step = 1;
5058 print_end_stepping_range_reason ();
5059 stop_stepping (ecs);
5060 }
5061 return;
5062 }
5063
5064 if ((stop_pc == stop_pc_sal.pc)
5065 && (ecs->event_thread->current_line != stop_pc_sal.line
5066 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5067 {
5068 /* We are at the start of a different line. So stop. Note that
5069 we don't stop if we step into the middle of a different line.
5070 That is said to make things like for (;;) statements work
5071 better. */
5072 if (debug_infrun)
5073 fprintf_unfiltered (gdb_stdlog,
5074 "infrun: stepped to a different line\n");
5075 ecs->event_thread->control.stop_step = 1;
5076 print_end_stepping_range_reason ();
5077 stop_stepping (ecs);
5078 return;
5079 }
5080
5081 /* We aren't done stepping.
5082
5083 Optimize by setting the stepping range to the line.
5084 (We might not be in the original line, but if we entered a
5085 new line in mid-statement, we continue stepping. This makes
5086 things like for(;;) statements work better.) */
5087
5088 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5089 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5090 set_step_info (frame, stop_pc_sal);
5091
5092 if (debug_infrun)
5093 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5094 keep_going (ecs);
5095 }
5096
5097 /* Is thread TP in the middle of single-stepping? */
5098
5099 static int
5100 currently_stepping (struct thread_info *tp)
5101 {
5102 return ((tp->control.step_range_end
5103 && tp->control.step_resume_breakpoint == NULL)
5104 || tp->control.trap_expected
5105 || tp->stepping_through_solib_after_catch
5106 || bpstat_should_step ());
5107 }
5108
5109 /* Returns true if any thread *but* the one passed in "data" is in the
5110 middle of stepping or of handling a "next". */
5111
5112 static int
5113 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
5114 {
5115 if (tp == data)
5116 return 0;
5117
5118 return (tp->control.step_range_end
5119 || tp->control.trap_expected
5120 || tp->stepping_through_solib_after_catch);
5121 }
5122
5123 /* Inferior has stepped into a subroutine call with source code that
5124 we should not step over. Do step to the first line of code in
5125 it. */
5126
5127 static void
5128 handle_step_into_function (struct gdbarch *gdbarch,
5129 struct execution_control_state *ecs)
5130 {
5131 struct symtab *s;
5132 struct symtab_and_line stop_func_sal, sr_sal;
5133
5134 s = find_pc_symtab (stop_pc);
5135 if (s && s->language != language_asm)
5136 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5137 ecs->stop_func_start);
5138
5139 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5140 /* Use the step_resume_break to step until the end of the prologue,
5141 even if that involves jumps (as it seems to on the vax under
5142 4.2). */
5143 /* If the prologue ends in the middle of a source line, continue to
5144 the end of that source line (if it is still within the function).
5145 Otherwise, just go to end of prologue. */
5146 if (stop_func_sal.end
5147 && stop_func_sal.pc != ecs->stop_func_start
5148 && stop_func_sal.end < ecs->stop_func_end)
5149 ecs->stop_func_start = stop_func_sal.end;
5150
5151 /* Architectures which require breakpoint adjustment might not be able
5152 to place a breakpoint at the computed address. If so, the test
5153 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5154 ecs->stop_func_start to an address at which a breakpoint may be
5155 legitimately placed.
5156
5157 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5158 made, GDB will enter an infinite loop when stepping through
5159 optimized code consisting of VLIW instructions which contain
5160 subinstructions corresponding to different source lines. On
5161 FR-V, it's not permitted to place a breakpoint on any but the
5162 first subinstruction of a VLIW instruction. When a breakpoint is
5163 set, GDB will adjust the breakpoint address to the beginning of
5164 the VLIW instruction. Thus, we need to make the corresponding
5165 adjustment here when computing the stop address. */
5166
5167 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5168 {
5169 ecs->stop_func_start
5170 = gdbarch_adjust_breakpoint_address (gdbarch,
5171 ecs->stop_func_start);
5172 }
5173
5174 if (ecs->stop_func_start == stop_pc)
5175 {
5176 /* We are already there: stop now. */
5177 ecs->event_thread->control.stop_step = 1;
5178 print_end_stepping_range_reason ();
5179 stop_stepping (ecs);
5180 return;
5181 }
5182 else
5183 {
5184 /* Put the step-breakpoint there and go until there. */
5185 init_sal (&sr_sal); /* initialize to zeroes */
5186 sr_sal.pc = ecs->stop_func_start;
5187 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5188 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5189
5190 /* Do not specify what the fp should be when we stop since on
5191 some machines the prologue is where the new fp value is
5192 established. */
5193 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5194
5195 /* And make sure stepping stops right away then. */
5196 ecs->event_thread->control.step_range_end
5197 = ecs->event_thread->control.step_range_start;
5198 }
5199 keep_going (ecs);
5200 }
5201
5202 /* Inferior has stepped backward into a subroutine call with source
5203 code that we should not step over. Do step to the beginning of the
5204 last line of code in it. */
5205
5206 static void
5207 handle_step_into_function_backward (struct gdbarch *gdbarch,
5208 struct execution_control_state *ecs)
5209 {
5210 struct symtab *s;
5211 struct symtab_and_line stop_func_sal;
5212
5213 s = find_pc_symtab (stop_pc);
5214 if (s && s->language != language_asm)
5215 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5216 ecs->stop_func_start);
5217
5218 stop_func_sal = find_pc_line (stop_pc, 0);
5219
5220 /* OK, we're just going to keep stepping here. */
5221 if (stop_func_sal.pc == stop_pc)
5222 {
5223 /* We're there already. Just stop stepping now. */
5224 ecs->event_thread->control.stop_step = 1;
5225 print_end_stepping_range_reason ();
5226 stop_stepping (ecs);
5227 }
5228 else
5229 {
5230 /* Else just reset the step range and keep going.
5231 No step-resume breakpoint, they don't work for
5232 epilogues, which can have multiple entry paths. */
5233 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5234 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5235 keep_going (ecs);
5236 }
5237 return;
5238 }
5239
5240 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5241 This is used to both functions and to skip over code. */
5242
5243 static void
5244 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5245 struct symtab_and_line sr_sal,
5246 struct frame_id sr_id,
5247 enum bptype sr_type)
5248 {
5249 /* There should never be more than one step-resume or longjmp-resume
5250 breakpoint per thread, so we should never be setting a new
5251 step_resume_breakpoint when one is already active. */
5252 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5253 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5254
5255 if (debug_infrun)
5256 fprintf_unfiltered (gdb_stdlog,
5257 "infrun: inserting step-resume breakpoint at %s\n",
5258 paddress (gdbarch, sr_sal.pc));
5259
5260 inferior_thread ()->control.step_resume_breakpoint
5261 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5262 }
5263
5264 void
5265 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5266 struct symtab_and_line sr_sal,
5267 struct frame_id sr_id)
5268 {
5269 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5270 sr_sal, sr_id,
5271 bp_step_resume);
5272 }
5273
5274 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5275 This is used to skip a potential signal handler.
5276
5277 This is called with the interrupted function's frame. The signal
5278 handler, when it returns, will resume the interrupted function at
5279 RETURN_FRAME.pc. */
5280
5281 static void
5282 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5283 {
5284 struct symtab_and_line sr_sal;
5285 struct gdbarch *gdbarch;
5286
5287 gdb_assert (return_frame != NULL);
5288 init_sal (&sr_sal); /* initialize to zeros */
5289
5290 gdbarch = get_frame_arch (return_frame);
5291 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5292 sr_sal.section = find_pc_overlay (sr_sal.pc);
5293 sr_sal.pspace = get_frame_program_space (return_frame);
5294
5295 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
5296 get_stack_frame_id (return_frame),
5297 bp_hp_step_resume);
5298 }
5299
5300 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
5301 is used to skip a function after stepping into it (for "next" or if
5302 the called function has no debugging information).
5303
5304 The current function has almost always been reached by single
5305 stepping a call or return instruction. NEXT_FRAME belongs to the
5306 current function, and the breakpoint will be set at the caller's
5307 resume address.
5308
5309 This is a separate function rather than reusing
5310 insert_hp_step_resume_breakpoint_at_frame in order to avoid
5311 get_prev_frame, which may stop prematurely (see the implementation
5312 of frame_unwind_caller_id for an example). */
5313
5314 static void
5315 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5316 {
5317 struct symtab_and_line sr_sal;
5318 struct gdbarch *gdbarch;
5319
5320 /* We shouldn't have gotten here if we don't know where the call site
5321 is. */
5322 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5323
5324 init_sal (&sr_sal); /* initialize to zeros */
5325
5326 gdbarch = frame_unwind_caller_arch (next_frame);
5327 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5328 frame_unwind_caller_pc (next_frame));
5329 sr_sal.section = find_pc_overlay (sr_sal.pc);
5330 sr_sal.pspace = frame_unwind_program_space (next_frame);
5331
5332 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5333 frame_unwind_caller_id (next_frame));
5334 }
5335
5336 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5337 new breakpoint at the target of a jmp_buf. The handling of
5338 longjmp-resume uses the same mechanisms used for handling
5339 "step-resume" breakpoints. */
5340
5341 static void
5342 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5343 {
5344 /* There should never be more than one step-resume or longjmp-resume
5345 breakpoint per thread, so we should never be setting a new
5346 longjmp_resume_breakpoint when one is already active. */
5347 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5348
5349 if (debug_infrun)
5350 fprintf_unfiltered (gdb_stdlog,
5351 "infrun: inserting longjmp-resume breakpoint at %s\n",
5352 paddress (gdbarch, pc));
5353
5354 inferior_thread ()->control.step_resume_breakpoint =
5355 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5356 }
5357
5358 /* Insert an exception resume breakpoint. TP is the thread throwing
5359 the exception. The block B is the block of the unwinder debug hook
5360 function. FRAME is the frame corresponding to the call to this
5361 function. SYM is the symbol of the function argument holding the
5362 target PC of the exception. */
5363
5364 static void
5365 insert_exception_resume_breakpoint (struct thread_info *tp,
5366 struct block *b,
5367 struct frame_info *frame,
5368 struct symbol *sym)
5369 {
5370 struct gdb_exception e;
5371
5372 /* We want to ignore errors here. */
5373 TRY_CATCH (e, RETURN_MASK_ERROR)
5374 {
5375 struct symbol *vsym;
5376 struct value *value;
5377 CORE_ADDR handler;
5378 struct breakpoint *bp;
5379
5380 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
5381 value = read_var_value (vsym, frame);
5382 /* If the value was optimized out, revert to the old behavior. */
5383 if (! value_optimized_out (value))
5384 {
5385 handler = value_as_address (value);
5386
5387 if (debug_infrun)
5388 fprintf_unfiltered (gdb_stdlog,
5389 "infrun: exception resume at %lx\n",
5390 (unsigned long) handler);
5391
5392 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
5393 handler, bp_exception_resume);
5394 bp->thread = tp->num;
5395 inferior_thread ()->control.exception_resume_breakpoint = bp;
5396 }
5397 }
5398 }
5399
5400 /* This is called when an exception has been intercepted. Check to
5401 see whether the exception's destination is of interest, and if so,
5402 set an exception resume breakpoint there. */
5403
5404 static void
5405 check_exception_resume (struct execution_control_state *ecs,
5406 struct frame_info *frame, struct symbol *func)
5407 {
5408 struct gdb_exception e;
5409
5410 TRY_CATCH (e, RETURN_MASK_ERROR)
5411 {
5412 struct block *b;
5413 struct dict_iterator iter;
5414 struct symbol *sym;
5415 int argno = 0;
5416
5417 /* The exception breakpoint is a thread-specific breakpoint on
5418 the unwinder's debug hook, declared as:
5419
5420 void _Unwind_DebugHook (void *cfa, void *handler);
5421
5422 The CFA argument indicates the frame to which control is
5423 about to be transferred. HANDLER is the destination PC.
5424
5425 We ignore the CFA and set a temporary breakpoint at HANDLER.
5426 This is not extremely efficient but it avoids issues in gdb
5427 with computing the DWARF CFA, and it also works even in weird
5428 cases such as throwing an exception from inside a signal
5429 handler. */
5430
5431 b = SYMBOL_BLOCK_VALUE (func);
5432 ALL_BLOCK_SYMBOLS (b, iter, sym)
5433 {
5434 if (!SYMBOL_IS_ARGUMENT (sym))
5435 continue;
5436
5437 if (argno == 0)
5438 ++argno;
5439 else
5440 {
5441 insert_exception_resume_breakpoint (ecs->event_thread,
5442 b, frame, sym);
5443 break;
5444 }
5445 }
5446 }
5447 }
5448
5449 static void
5450 stop_stepping (struct execution_control_state *ecs)
5451 {
5452 if (debug_infrun)
5453 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5454
5455 /* Let callers know we don't want to wait for the inferior anymore. */
5456 ecs->wait_some_more = 0;
5457 }
5458
5459 /* This function handles various cases where we need to continue
5460 waiting for the inferior. */
5461 /* (Used to be the keep_going: label in the old wait_for_inferior). */
5462
5463 static void
5464 keep_going (struct execution_control_state *ecs)
5465 {
5466 /* Make sure normal_stop is called if we get a QUIT handled before
5467 reaching resume. */
5468 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5469
5470 /* Save the pc before execution, to compare with pc after stop. */
5471 ecs->event_thread->prev_pc
5472 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5473
5474 /* If we did not do break;, it means we should keep running the
5475 inferior and not return to debugger. */
5476
5477 if (ecs->event_thread->control.trap_expected
5478 && ecs->event_thread->suspend.stop_signal != TARGET_SIGNAL_TRAP)
5479 {
5480 /* We took a signal (which we are supposed to pass through to
5481 the inferior, else we'd not get here) and we haven't yet
5482 gotten our trap. Simply continue. */
5483
5484 discard_cleanups (old_cleanups);
5485 resume (currently_stepping (ecs->event_thread),
5486 ecs->event_thread->suspend.stop_signal);
5487 }
5488 else
5489 {
5490 /* Either the trap was not expected, but we are continuing
5491 anyway (the user asked that this signal be passed to the
5492 child)
5493 -- or --
5494 The signal was SIGTRAP, e.g. it was our signal, but we
5495 decided we should resume from it.
5496
5497 We're going to run this baby now!
5498
5499 Note that insert_breakpoints won't try to re-insert
5500 already inserted breakpoints. Therefore, we don't
5501 care if breakpoints were already inserted, or not. */
5502
5503 if (ecs->event_thread->stepping_over_breakpoint)
5504 {
5505 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5506
5507 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5508 /* Since we can't do a displaced step, we have to remove
5509 the breakpoint while we step it. To keep things
5510 simple, we remove them all. */
5511 remove_breakpoints ();
5512 }
5513 else
5514 {
5515 struct gdb_exception e;
5516
5517 /* Stop stepping when inserting breakpoints
5518 has failed. */
5519 TRY_CATCH (e, RETURN_MASK_ERROR)
5520 {
5521 insert_breakpoints ();
5522 }
5523 if (e.reason < 0)
5524 {
5525 exception_print (gdb_stderr, e);
5526 stop_stepping (ecs);
5527 return;
5528 }
5529 }
5530
5531 ecs->event_thread->control.trap_expected
5532 = ecs->event_thread->stepping_over_breakpoint;
5533
5534 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5535 specifies that such a signal should be delivered to the
5536 target program).
5537
5538 Typically, this would occure when a user is debugging a
5539 target monitor on a simulator: the target monitor sets a
5540 breakpoint; the simulator encounters this break-point and
5541 halts the simulation handing control to GDB; GDB, noteing
5542 that the break-point isn't valid, returns control back to the
5543 simulator; the simulator then delivers the hardware
5544 equivalent of a SIGNAL_TRAP to the program being debugged. */
5545
5546 if (ecs->event_thread->suspend.stop_signal == TARGET_SIGNAL_TRAP
5547 && !signal_program[ecs->event_thread->suspend.stop_signal])
5548 ecs->event_thread->suspend.stop_signal = TARGET_SIGNAL_0;
5549
5550 discard_cleanups (old_cleanups);
5551 resume (currently_stepping (ecs->event_thread),
5552 ecs->event_thread->suspend.stop_signal);
5553 }
5554
5555 prepare_to_wait (ecs);
5556 }
5557
5558 /* This function normally comes after a resume, before
5559 handle_inferior_event exits. It takes care of any last bits of
5560 housekeeping, and sets the all-important wait_some_more flag. */
5561
5562 static void
5563 prepare_to_wait (struct execution_control_state *ecs)
5564 {
5565 if (debug_infrun)
5566 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5567
5568 /* This is the old end of the while loop. Let everybody know we
5569 want to wait for the inferior some more and get called again
5570 soon. */
5571 ecs->wait_some_more = 1;
5572 }
5573
5574 /* Several print_*_reason functions to print why the inferior has stopped.
5575 We always print something when the inferior exits, or receives a signal.
5576 The rest of the cases are dealt with later on in normal_stop and
5577 print_it_typical. Ideally there should be a call to one of these
5578 print_*_reason functions functions from handle_inferior_event each time
5579 stop_stepping is called. */
5580
5581 /* Print why the inferior has stopped.
5582 We are done with a step/next/si/ni command, print why the inferior has
5583 stopped. For now print nothing. Print a message only if not in the middle
5584 of doing a "step n" operation for n > 1. */
5585
5586 static void
5587 print_end_stepping_range_reason (void)
5588 {
5589 if ((!inferior_thread ()->step_multi
5590 || !inferior_thread ()->control.stop_step)
5591 && ui_out_is_mi_like_p (uiout))
5592 ui_out_field_string (uiout, "reason",
5593 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5594 }
5595
5596 /* The inferior was terminated by a signal, print why it stopped. */
5597
5598 static void
5599 print_signal_exited_reason (enum target_signal siggnal)
5600 {
5601 annotate_signalled ();
5602 if (ui_out_is_mi_like_p (uiout))
5603 ui_out_field_string
5604 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5605 ui_out_text (uiout, "\nProgram terminated with signal ");
5606 annotate_signal_name ();
5607 ui_out_field_string (uiout, "signal-name",
5608 target_signal_to_name (siggnal));
5609 annotate_signal_name_end ();
5610 ui_out_text (uiout, ", ");
5611 annotate_signal_string ();
5612 ui_out_field_string (uiout, "signal-meaning",
5613 target_signal_to_string (siggnal));
5614 annotate_signal_string_end ();
5615 ui_out_text (uiout, ".\n");
5616 ui_out_text (uiout, "The program no longer exists.\n");
5617 }
5618
5619 /* The inferior program is finished, print why it stopped. */
5620
5621 static void
5622 print_exited_reason (int exitstatus)
5623 {
5624 struct inferior *inf = current_inferior ();
5625 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
5626
5627 annotate_exited (exitstatus);
5628 if (exitstatus)
5629 {
5630 if (ui_out_is_mi_like_p (uiout))
5631 ui_out_field_string (uiout, "reason",
5632 async_reason_lookup (EXEC_ASYNC_EXITED));
5633 ui_out_text (uiout, "[Inferior ");
5634 ui_out_text (uiout, plongest (inf->num));
5635 ui_out_text (uiout, " (");
5636 ui_out_text (uiout, pidstr);
5637 ui_out_text (uiout, ") exited with code ");
5638 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5639 ui_out_text (uiout, "]\n");
5640 }
5641 else
5642 {
5643 if (ui_out_is_mi_like_p (uiout))
5644 ui_out_field_string
5645 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5646 ui_out_text (uiout, "[Inferior ");
5647 ui_out_text (uiout, plongest (inf->num));
5648 ui_out_text (uiout, " (");
5649 ui_out_text (uiout, pidstr);
5650 ui_out_text (uiout, ") exited normally]\n");
5651 }
5652 /* Support the --return-child-result option. */
5653 return_child_result_value = exitstatus;
5654 }
5655
5656 /* Signal received, print why the inferior has stopped. The signal table
5657 tells us to print about it. */
5658
5659 static void
5660 print_signal_received_reason (enum target_signal siggnal)
5661 {
5662 annotate_signal ();
5663
5664 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5665 {
5666 struct thread_info *t = inferior_thread ();
5667
5668 ui_out_text (uiout, "\n[");
5669 ui_out_field_string (uiout, "thread-name",
5670 target_pid_to_str (t->ptid));
5671 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5672 ui_out_text (uiout, " stopped");
5673 }
5674 else
5675 {
5676 ui_out_text (uiout, "\nProgram received signal ");
5677 annotate_signal_name ();
5678 if (ui_out_is_mi_like_p (uiout))
5679 ui_out_field_string
5680 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5681 ui_out_field_string (uiout, "signal-name",
5682 target_signal_to_name (siggnal));
5683 annotate_signal_name_end ();
5684 ui_out_text (uiout, ", ");
5685 annotate_signal_string ();
5686 ui_out_field_string (uiout, "signal-meaning",
5687 target_signal_to_string (siggnal));
5688 annotate_signal_string_end ();
5689 }
5690 ui_out_text (uiout, ".\n");
5691 }
5692
5693 /* Reverse execution: target ran out of history info, print why the inferior
5694 has stopped. */
5695
5696 static void
5697 print_no_history_reason (void)
5698 {
5699 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5700 }
5701
5702 /* Here to return control to GDB when the inferior stops for real.
5703 Print appropriate messages, remove breakpoints, give terminal our modes.
5704
5705 STOP_PRINT_FRAME nonzero means print the executing frame
5706 (pc, function, args, file, line number and line text).
5707 BREAKPOINTS_FAILED nonzero means stop was due to error
5708 attempting to insert breakpoints. */
5709
5710 void
5711 normal_stop (void)
5712 {
5713 struct target_waitstatus last;
5714 ptid_t last_ptid;
5715 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5716
5717 get_last_target_status (&last_ptid, &last);
5718
5719 /* If an exception is thrown from this point on, make sure to
5720 propagate GDB's knowledge of the executing state to the
5721 frontend/user running state. A QUIT is an easy exception to see
5722 here, so do this before any filtered output. */
5723 if (!non_stop)
5724 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5725 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5726 && last.kind != TARGET_WAITKIND_EXITED)
5727 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5728
5729 /* In non-stop mode, we don't want GDB to switch threads behind the
5730 user's back, to avoid races where the user is typing a command to
5731 apply to thread x, but GDB switches to thread y before the user
5732 finishes entering the command. */
5733
5734 /* As with the notification of thread events, we want to delay
5735 notifying the user that we've switched thread context until
5736 the inferior actually stops.
5737
5738 There's no point in saying anything if the inferior has exited.
5739 Note that SIGNALLED here means "exited with a signal", not
5740 "received a signal". */
5741 if (!non_stop
5742 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5743 && target_has_execution
5744 && last.kind != TARGET_WAITKIND_SIGNALLED
5745 && last.kind != TARGET_WAITKIND_EXITED)
5746 {
5747 target_terminal_ours_for_output ();
5748 printf_filtered (_("[Switching to %s]\n"),
5749 target_pid_to_str (inferior_ptid));
5750 annotate_thread_changed ();
5751 previous_inferior_ptid = inferior_ptid;
5752 }
5753
5754 if (!breakpoints_always_inserted_mode () && target_has_execution)
5755 {
5756 if (remove_breakpoints ())
5757 {
5758 target_terminal_ours_for_output ();
5759 printf_filtered (_("Cannot remove breakpoints because "
5760 "program is no longer writable.\nFurther "
5761 "execution is probably impossible.\n"));
5762 }
5763 }
5764
5765 /* If an auto-display called a function and that got a signal,
5766 delete that auto-display to avoid an infinite recursion. */
5767
5768 if (stopped_by_random_signal)
5769 disable_current_display ();
5770
5771 /* Don't print a message if in the middle of doing a "step n"
5772 operation for n > 1 */
5773 if (target_has_execution
5774 && last.kind != TARGET_WAITKIND_SIGNALLED
5775 && last.kind != TARGET_WAITKIND_EXITED
5776 && inferior_thread ()->step_multi
5777 && inferior_thread ()->control.stop_step)
5778 goto done;
5779
5780 target_terminal_ours ();
5781
5782 /* Set the current source location. This will also happen if we
5783 display the frame below, but the current SAL will be incorrect
5784 during a user hook-stop function. */
5785 if (has_stack_frames () && !stop_stack_dummy)
5786 set_current_sal_from_frame (get_current_frame (), 1);
5787
5788 /* Let the user/frontend see the threads as stopped. */
5789 do_cleanups (old_chain);
5790
5791 /* Look up the hook_stop and run it (CLI internally handles problem
5792 of stop_command's pre-hook not existing). */
5793 if (stop_command)
5794 catch_errors (hook_stop_stub, stop_command,
5795 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5796
5797 if (!has_stack_frames ())
5798 goto done;
5799
5800 if (last.kind == TARGET_WAITKIND_SIGNALLED
5801 || last.kind == TARGET_WAITKIND_EXITED)
5802 goto done;
5803
5804 /* Select innermost stack frame - i.e., current frame is frame 0,
5805 and current location is based on that.
5806 Don't do this on return from a stack dummy routine,
5807 or if the program has exited. */
5808
5809 if (!stop_stack_dummy)
5810 {
5811 select_frame (get_current_frame ());
5812
5813 /* Print current location without a level number, if
5814 we have changed functions or hit a breakpoint.
5815 Print source line if we have one.
5816 bpstat_print() contains the logic deciding in detail
5817 what to print, based on the event(s) that just occurred. */
5818
5819 /* If --batch-silent is enabled then there's no need to print the current
5820 source location, and to try risks causing an error message about
5821 missing source files. */
5822 if (stop_print_frame && !batch_silent)
5823 {
5824 int bpstat_ret;
5825 int source_flag;
5826 int do_frame_printing = 1;
5827 struct thread_info *tp = inferior_thread ();
5828
5829 bpstat_ret = bpstat_print (tp->control.stop_bpstat);
5830 switch (bpstat_ret)
5831 {
5832 case PRINT_UNKNOWN:
5833 /* If we had hit a shared library event breakpoint,
5834 bpstat_print would print out this message. If we hit
5835 an OS-level shared library event, do the same
5836 thing. */
5837 if (last.kind == TARGET_WAITKIND_LOADED)
5838 {
5839 printf_filtered (_("Stopped due to shared library event\n"));
5840 source_flag = SRC_LINE; /* something bogus */
5841 do_frame_printing = 0;
5842 break;
5843 }
5844
5845 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5846 (or should) carry around the function and does (or
5847 should) use that when doing a frame comparison. */
5848 if (tp->control.stop_step
5849 && frame_id_eq (tp->control.step_frame_id,
5850 get_frame_id (get_current_frame ()))
5851 && step_start_function == find_pc_function (stop_pc))
5852 source_flag = SRC_LINE; /* Finished step, just
5853 print source line. */
5854 else
5855 source_flag = SRC_AND_LOC; /* Print location and
5856 source line. */
5857 break;
5858 case PRINT_SRC_AND_LOC:
5859 source_flag = SRC_AND_LOC; /* Print location and
5860 source line. */
5861 break;
5862 case PRINT_SRC_ONLY:
5863 source_flag = SRC_LINE;
5864 break;
5865 case PRINT_NOTHING:
5866 source_flag = SRC_LINE; /* something bogus */
5867 do_frame_printing = 0;
5868 break;
5869 default:
5870 internal_error (__FILE__, __LINE__, _("Unknown value."));
5871 }
5872
5873 /* The behavior of this routine with respect to the source
5874 flag is:
5875 SRC_LINE: Print only source line
5876 LOCATION: Print only location
5877 SRC_AND_LOC: Print location and source line. */
5878 if (do_frame_printing)
5879 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5880
5881 /* Display the auto-display expressions. */
5882 do_displays ();
5883 }
5884 }
5885
5886 /* Save the function value return registers, if we care.
5887 We might be about to restore their previous contents. */
5888 if (inferior_thread ()->control.proceed_to_finish
5889 && execution_direction != EXEC_REVERSE)
5890 {
5891 /* This should not be necessary. */
5892 if (stop_registers)
5893 regcache_xfree (stop_registers);
5894
5895 /* NB: The copy goes through to the target picking up the value of
5896 all the registers. */
5897 stop_registers = regcache_dup (get_current_regcache ());
5898 }
5899
5900 if (stop_stack_dummy == STOP_STACK_DUMMY)
5901 {
5902 /* Pop the empty frame that contains the stack dummy.
5903 This also restores inferior state prior to the call
5904 (struct infcall_suspend_state). */
5905 struct frame_info *frame = get_current_frame ();
5906
5907 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5908 frame_pop (frame);
5909 /* frame_pop() calls reinit_frame_cache as the last thing it
5910 does which means there's currently no selected frame. We
5911 don't need to re-establish a selected frame if the dummy call
5912 returns normally, that will be done by
5913 restore_infcall_control_state. However, we do have to handle
5914 the case where the dummy call is returning after being
5915 stopped (e.g. the dummy call previously hit a breakpoint).
5916 We can't know which case we have so just always re-establish
5917 a selected frame here. */
5918 select_frame (get_current_frame ());
5919 }
5920
5921 done:
5922 annotate_stopped ();
5923
5924 /* Suppress the stop observer if we're in the middle of:
5925
5926 - a step n (n > 1), as there still more steps to be done.
5927
5928 - a "finish" command, as the observer will be called in
5929 finish_command_continuation, so it can include the inferior
5930 function's return value.
5931
5932 - calling an inferior function, as we pretend we inferior didn't
5933 run at all. The return value of the call is handled by the
5934 expression evaluator, through call_function_by_hand. */
5935
5936 if (!target_has_execution
5937 || last.kind == TARGET_WAITKIND_SIGNALLED
5938 || last.kind == TARGET_WAITKIND_EXITED
5939 || (!inferior_thread ()->step_multi
5940 && !(inferior_thread ()->control.stop_bpstat
5941 && inferior_thread ()->control.proceed_to_finish)
5942 && !inferior_thread ()->control.in_infcall))
5943 {
5944 if (!ptid_equal (inferior_ptid, null_ptid))
5945 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
5946 stop_print_frame);
5947 else
5948 observer_notify_normal_stop (NULL, stop_print_frame);
5949 }
5950
5951 if (target_has_execution)
5952 {
5953 if (last.kind != TARGET_WAITKIND_SIGNALLED
5954 && last.kind != TARGET_WAITKIND_EXITED)
5955 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5956 Delete any breakpoint that is to be deleted at the next stop. */
5957 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
5958 }
5959
5960 /* Try to get rid of automatically added inferiors that are no
5961 longer needed. Keeping those around slows down things linearly.
5962 Note that this never removes the current inferior. */
5963 prune_inferiors ();
5964 }
5965
5966 static int
5967 hook_stop_stub (void *cmd)
5968 {
5969 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5970 return (0);
5971 }
5972 \f
5973 int
5974 signal_stop_state (int signo)
5975 {
5976 return signal_stop[signo];
5977 }
5978
5979 int
5980 signal_print_state (int signo)
5981 {
5982 return signal_print[signo];
5983 }
5984
5985 int
5986 signal_pass_state (int signo)
5987 {
5988 return signal_program[signo];
5989 }
5990
5991 static void
5992 signal_cache_update (int signo)
5993 {
5994 if (signo == -1)
5995 {
5996 for (signo = 0; signo < (int) TARGET_SIGNAL_LAST; signo++)
5997 signal_cache_update (signo);
5998
5999 return;
6000 }
6001
6002 signal_pass[signo] = (signal_stop[signo] == 0
6003 && signal_print[signo] == 0
6004 && signal_program[signo] == 1);
6005 }
6006
6007 int
6008 signal_stop_update (int signo, int state)
6009 {
6010 int ret = signal_stop[signo];
6011
6012 signal_stop[signo] = state;
6013 signal_cache_update (signo);
6014 return ret;
6015 }
6016
6017 int
6018 signal_print_update (int signo, int state)
6019 {
6020 int ret = signal_print[signo];
6021
6022 signal_print[signo] = state;
6023 signal_cache_update (signo);
6024 return ret;
6025 }
6026
6027 int
6028 signal_pass_update (int signo, int state)
6029 {
6030 int ret = signal_program[signo];
6031
6032 signal_program[signo] = state;
6033 signal_cache_update (signo);
6034 return ret;
6035 }
6036
6037 static void
6038 sig_print_header (void)
6039 {
6040 printf_filtered (_("Signal Stop\tPrint\tPass "
6041 "to program\tDescription\n"));
6042 }
6043
6044 static void
6045 sig_print_info (enum target_signal oursig)
6046 {
6047 const char *name = target_signal_to_name (oursig);
6048 int name_padding = 13 - strlen (name);
6049
6050 if (name_padding <= 0)
6051 name_padding = 0;
6052
6053 printf_filtered ("%s", name);
6054 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6055 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6056 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6057 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6058 printf_filtered ("%s\n", target_signal_to_string (oursig));
6059 }
6060
6061 /* Specify how various signals in the inferior should be handled. */
6062
6063 static void
6064 handle_command (char *args, int from_tty)
6065 {
6066 char **argv;
6067 int digits, wordlen;
6068 int sigfirst, signum, siglast;
6069 enum target_signal oursig;
6070 int allsigs;
6071 int nsigs;
6072 unsigned char *sigs;
6073 struct cleanup *old_chain;
6074
6075 if (args == NULL)
6076 {
6077 error_no_arg (_("signal to handle"));
6078 }
6079
6080 /* Allocate and zero an array of flags for which signals to handle. */
6081
6082 nsigs = (int) TARGET_SIGNAL_LAST;
6083 sigs = (unsigned char *) alloca (nsigs);
6084 memset (sigs, 0, nsigs);
6085
6086 /* Break the command line up into args. */
6087
6088 argv = gdb_buildargv (args);
6089 old_chain = make_cleanup_freeargv (argv);
6090
6091 /* Walk through the args, looking for signal oursigs, signal names, and
6092 actions. Signal numbers and signal names may be interspersed with
6093 actions, with the actions being performed for all signals cumulatively
6094 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6095
6096 while (*argv != NULL)
6097 {
6098 wordlen = strlen (*argv);
6099 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6100 {;
6101 }
6102 allsigs = 0;
6103 sigfirst = siglast = -1;
6104
6105 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6106 {
6107 /* Apply action to all signals except those used by the
6108 debugger. Silently skip those. */
6109 allsigs = 1;
6110 sigfirst = 0;
6111 siglast = nsigs - 1;
6112 }
6113 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6114 {
6115 SET_SIGS (nsigs, sigs, signal_stop);
6116 SET_SIGS (nsigs, sigs, signal_print);
6117 }
6118 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6119 {
6120 UNSET_SIGS (nsigs, sigs, signal_program);
6121 }
6122 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6123 {
6124 SET_SIGS (nsigs, sigs, signal_print);
6125 }
6126 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6127 {
6128 SET_SIGS (nsigs, sigs, signal_program);
6129 }
6130 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6131 {
6132 UNSET_SIGS (nsigs, sigs, signal_stop);
6133 }
6134 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6135 {
6136 SET_SIGS (nsigs, sigs, signal_program);
6137 }
6138 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6139 {
6140 UNSET_SIGS (nsigs, sigs, signal_print);
6141 UNSET_SIGS (nsigs, sigs, signal_stop);
6142 }
6143 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6144 {
6145 UNSET_SIGS (nsigs, sigs, signal_program);
6146 }
6147 else if (digits > 0)
6148 {
6149 /* It is numeric. The numeric signal refers to our own
6150 internal signal numbering from target.h, not to host/target
6151 signal number. This is a feature; users really should be
6152 using symbolic names anyway, and the common ones like
6153 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6154
6155 sigfirst = siglast = (int)
6156 target_signal_from_command (atoi (*argv));
6157 if ((*argv)[digits] == '-')
6158 {
6159 siglast = (int)
6160 target_signal_from_command (atoi ((*argv) + digits + 1));
6161 }
6162 if (sigfirst > siglast)
6163 {
6164 /* Bet he didn't figure we'd think of this case... */
6165 signum = sigfirst;
6166 sigfirst = siglast;
6167 siglast = signum;
6168 }
6169 }
6170 else
6171 {
6172 oursig = target_signal_from_name (*argv);
6173 if (oursig != TARGET_SIGNAL_UNKNOWN)
6174 {
6175 sigfirst = siglast = (int) oursig;
6176 }
6177 else
6178 {
6179 /* Not a number and not a recognized flag word => complain. */
6180 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
6181 }
6182 }
6183
6184 /* If any signal numbers or symbol names were found, set flags for
6185 which signals to apply actions to. */
6186
6187 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
6188 {
6189 switch ((enum target_signal) signum)
6190 {
6191 case TARGET_SIGNAL_TRAP:
6192 case TARGET_SIGNAL_INT:
6193 if (!allsigs && !sigs[signum])
6194 {
6195 if (query (_("%s is used by the debugger.\n\
6196 Are you sure you want to change it? "),
6197 target_signal_to_name ((enum target_signal) signum)))
6198 {
6199 sigs[signum] = 1;
6200 }
6201 else
6202 {
6203 printf_unfiltered (_("Not confirmed, unchanged.\n"));
6204 gdb_flush (gdb_stdout);
6205 }
6206 }
6207 break;
6208 case TARGET_SIGNAL_0:
6209 case TARGET_SIGNAL_DEFAULT:
6210 case TARGET_SIGNAL_UNKNOWN:
6211 /* Make sure that "all" doesn't print these. */
6212 break;
6213 default:
6214 sigs[signum] = 1;
6215 break;
6216 }
6217 }
6218
6219 argv++;
6220 }
6221
6222 for (signum = 0; signum < nsigs; signum++)
6223 if (sigs[signum])
6224 {
6225 signal_cache_update (-1);
6226 target_pass_signals ((int) TARGET_SIGNAL_LAST, signal_pass);
6227
6228 if (from_tty)
6229 {
6230 /* Show the results. */
6231 sig_print_header ();
6232 for (; signum < nsigs; signum++)
6233 if (sigs[signum])
6234 sig_print_info (signum);
6235 }
6236
6237 break;
6238 }
6239
6240 do_cleanups (old_chain);
6241 }
6242
6243 static void
6244 xdb_handle_command (char *args, int from_tty)
6245 {
6246 char **argv;
6247 struct cleanup *old_chain;
6248
6249 if (args == NULL)
6250 error_no_arg (_("xdb command"));
6251
6252 /* Break the command line up into args. */
6253
6254 argv = gdb_buildargv (args);
6255 old_chain = make_cleanup_freeargv (argv);
6256 if (argv[1] != (char *) NULL)
6257 {
6258 char *argBuf;
6259 int bufLen;
6260
6261 bufLen = strlen (argv[0]) + 20;
6262 argBuf = (char *) xmalloc (bufLen);
6263 if (argBuf)
6264 {
6265 int validFlag = 1;
6266 enum target_signal oursig;
6267
6268 oursig = target_signal_from_name (argv[0]);
6269 memset (argBuf, 0, bufLen);
6270 if (strcmp (argv[1], "Q") == 0)
6271 sprintf (argBuf, "%s %s", argv[0], "noprint");
6272 else
6273 {
6274 if (strcmp (argv[1], "s") == 0)
6275 {
6276 if (!signal_stop[oursig])
6277 sprintf (argBuf, "%s %s", argv[0], "stop");
6278 else
6279 sprintf (argBuf, "%s %s", argv[0], "nostop");
6280 }
6281 else if (strcmp (argv[1], "i") == 0)
6282 {
6283 if (!signal_program[oursig])
6284 sprintf (argBuf, "%s %s", argv[0], "pass");
6285 else
6286 sprintf (argBuf, "%s %s", argv[0], "nopass");
6287 }
6288 else if (strcmp (argv[1], "r") == 0)
6289 {
6290 if (!signal_print[oursig])
6291 sprintf (argBuf, "%s %s", argv[0], "print");
6292 else
6293 sprintf (argBuf, "%s %s", argv[0], "noprint");
6294 }
6295 else
6296 validFlag = 0;
6297 }
6298 if (validFlag)
6299 handle_command (argBuf, from_tty);
6300 else
6301 printf_filtered (_("Invalid signal handling flag.\n"));
6302 if (argBuf)
6303 xfree (argBuf);
6304 }
6305 }
6306 do_cleanups (old_chain);
6307 }
6308
6309 /* Print current contents of the tables set by the handle command.
6310 It is possible we should just be printing signals actually used
6311 by the current target (but for things to work right when switching
6312 targets, all signals should be in the signal tables). */
6313
6314 static void
6315 signals_info (char *signum_exp, int from_tty)
6316 {
6317 enum target_signal oursig;
6318
6319 sig_print_header ();
6320
6321 if (signum_exp)
6322 {
6323 /* First see if this is a symbol name. */
6324 oursig = target_signal_from_name (signum_exp);
6325 if (oursig == TARGET_SIGNAL_UNKNOWN)
6326 {
6327 /* No, try numeric. */
6328 oursig =
6329 target_signal_from_command (parse_and_eval_long (signum_exp));
6330 }
6331 sig_print_info (oursig);
6332 return;
6333 }
6334
6335 printf_filtered ("\n");
6336 /* These ugly casts brought to you by the native VAX compiler. */
6337 for (oursig = TARGET_SIGNAL_FIRST;
6338 (int) oursig < (int) TARGET_SIGNAL_LAST;
6339 oursig = (enum target_signal) ((int) oursig + 1))
6340 {
6341 QUIT;
6342
6343 if (oursig != TARGET_SIGNAL_UNKNOWN
6344 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
6345 sig_print_info (oursig);
6346 }
6347
6348 printf_filtered (_("\nUse the \"handle\" command "
6349 "to change these tables.\n"));
6350 }
6351
6352 /* The $_siginfo convenience variable is a bit special. We don't know
6353 for sure the type of the value until we actually have a chance to
6354 fetch the data. The type can change depending on gdbarch, so it is
6355 also dependent on which thread you have selected.
6356
6357 1. making $_siginfo be an internalvar that creates a new value on
6358 access.
6359
6360 2. making the value of $_siginfo be an lval_computed value. */
6361
6362 /* This function implements the lval_computed support for reading a
6363 $_siginfo value. */
6364
6365 static void
6366 siginfo_value_read (struct value *v)
6367 {
6368 LONGEST transferred;
6369
6370 transferred =
6371 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
6372 NULL,
6373 value_contents_all_raw (v),
6374 value_offset (v),
6375 TYPE_LENGTH (value_type (v)));
6376
6377 if (transferred != TYPE_LENGTH (value_type (v)))
6378 error (_("Unable to read siginfo"));
6379 }
6380
6381 /* This function implements the lval_computed support for writing a
6382 $_siginfo value. */
6383
6384 static void
6385 siginfo_value_write (struct value *v, struct value *fromval)
6386 {
6387 LONGEST transferred;
6388
6389 transferred = target_write (&current_target,
6390 TARGET_OBJECT_SIGNAL_INFO,
6391 NULL,
6392 value_contents_all_raw (fromval),
6393 value_offset (v),
6394 TYPE_LENGTH (value_type (fromval)));
6395
6396 if (transferred != TYPE_LENGTH (value_type (fromval)))
6397 error (_("Unable to write siginfo"));
6398 }
6399
6400 static struct lval_funcs siginfo_value_funcs =
6401 {
6402 siginfo_value_read,
6403 siginfo_value_write
6404 };
6405
6406 /* Return a new value with the correct type for the siginfo object of
6407 the current thread using architecture GDBARCH. Return a void value
6408 if there's no object available. */
6409
6410 static struct value *
6411 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6412 {
6413 if (target_has_stack
6414 && !ptid_equal (inferior_ptid, null_ptid)
6415 && gdbarch_get_siginfo_type_p (gdbarch))
6416 {
6417 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6418
6419 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6420 }
6421
6422 return allocate_value (builtin_type (gdbarch)->builtin_void);
6423 }
6424
6425 \f
6426 /* infcall_suspend_state contains state about the program itself like its
6427 registers and any signal it received when it last stopped.
6428 This state must be restored regardless of how the inferior function call
6429 ends (either successfully, or after it hits a breakpoint or signal)
6430 if the program is to properly continue where it left off. */
6431
6432 struct infcall_suspend_state
6433 {
6434 struct thread_suspend_state thread_suspend;
6435 struct inferior_suspend_state inferior_suspend;
6436
6437 /* Other fields: */
6438 CORE_ADDR stop_pc;
6439 struct regcache *registers;
6440
6441 /* Format of SIGINFO_DATA or NULL if it is not present. */
6442 struct gdbarch *siginfo_gdbarch;
6443
6444 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6445 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6446 content would be invalid. */
6447 gdb_byte *siginfo_data;
6448 };
6449
6450 struct infcall_suspend_state *
6451 save_infcall_suspend_state (void)
6452 {
6453 struct infcall_suspend_state *inf_state;
6454 struct thread_info *tp = inferior_thread ();
6455 struct inferior *inf = current_inferior ();
6456 struct regcache *regcache = get_current_regcache ();
6457 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6458 gdb_byte *siginfo_data = NULL;
6459
6460 if (gdbarch_get_siginfo_type_p (gdbarch))
6461 {
6462 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6463 size_t len = TYPE_LENGTH (type);
6464 struct cleanup *back_to;
6465
6466 siginfo_data = xmalloc (len);
6467 back_to = make_cleanup (xfree, siginfo_data);
6468
6469 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6470 siginfo_data, 0, len) == len)
6471 discard_cleanups (back_to);
6472 else
6473 {
6474 /* Errors ignored. */
6475 do_cleanups (back_to);
6476 siginfo_data = NULL;
6477 }
6478 }
6479
6480 inf_state = XZALLOC (struct infcall_suspend_state);
6481
6482 if (siginfo_data)
6483 {
6484 inf_state->siginfo_gdbarch = gdbarch;
6485 inf_state->siginfo_data = siginfo_data;
6486 }
6487
6488 inf_state->thread_suspend = tp->suspend;
6489 inf_state->inferior_suspend = inf->suspend;
6490
6491 /* run_inferior_call will not use the signal due to its `proceed' call with
6492 TARGET_SIGNAL_0 anyway. */
6493 tp->suspend.stop_signal = TARGET_SIGNAL_0;
6494
6495 inf_state->stop_pc = stop_pc;
6496
6497 inf_state->registers = regcache_dup (regcache);
6498
6499 return inf_state;
6500 }
6501
6502 /* Restore inferior session state to INF_STATE. */
6503
6504 void
6505 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6506 {
6507 struct thread_info *tp = inferior_thread ();
6508 struct inferior *inf = current_inferior ();
6509 struct regcache *regcache = get_current_regcache ();
6510 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6511
6512 tp->suspend = inf_state->thread_suspend;
6513 inf->suspend = inf_state->inferior_suspend;
6514
6515 stop_pc = inf_state->stop_pc;
6516
6517 if (inf_state->siginfo_gdbarch == gdbarch)
6518 {
6519 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6520 size_t len = TYPE_LENGTH (type);
6521
6522 /* Errors ignored. */
6523 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6524 inf_state->siginfo_data, 0, len);
6525 }
6526
6527 /* The inferior can be gone if the user types "print exit(0)"
6528 (and perhaps other times). */
6529 if (target_has_execution)
6530 /* NB: The register write goes through to the target. */
6531 regcache_cpy (regcache, inf_state->registers);
6532
6533 discard_infcall_suspend_state (inf_state);
6534 }
6535
6536 static void
6537 do_restore_infcall_suspend_state_cleanup (void *state)
6538 {
6539 restore_infcall_suspend_state (state);
6540 }
6541
6542 struct cleanup *
6543 make_cleanup_restore_infcall_suspend_state
6544 (struct infcall_suspend_state *inf_state)
6545 {
6546 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
6547 }
6548
6549 void
6550 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
6551 {
6552 regcache_xfree (inf_state->registers);
6553 xfree (inf_state->siginfo_data);
6554 xfree (inf_state);
6555 }
6556
6557 struct regcache *
6558 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
6559 {
6560 return inf_state->registers;
6561 }
6562
6563 /* infcall_control_state contains state regarding gdb's control of the
6564 inferior itself like stepping control. It also contains session state like
6565 the user's currently selected frame. */
6566
6567 struct infcall_control_state
6568 {
6569 struct thread_control_state thread_control;
6570 struct inferior_control_state inferior_control;
6571
6572 /* Other fields: */
6573 enum stop_stack_kind stop_stack_dummy;
6574 int stopped_by_random_signal;
6575 int stop_after_trap;
6576
6577 /* ID if the selected frame when the inferior function call was made. */
6578 struct frame_id selected_frame_id;
6579 };
6580
6581 /* Save all of the information associated with the inferior<==>gdb
6582 connection. */
6583
6584 struct infcall_control_state *
6585 save_infcall_control_state (void)
6586 {
6587 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
6588 struct thread_info *tp = inferior_thread ();
6589 struct inferior *inf = current_inferior ();
6590
6591 inf_status->thread_control = tp->control;
6592 inf_status->inferior_control = inf->control;
6593
6594 tp->control.step_resume_breakpoint = NULL;
6595 tp->control.exception_resume_breakpoint = NULL;
6596
6597 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
6598 chain. If caller's caller is walking the chain, they'll be happier if we
6599 hand them back the original chain when restore_infcall_control_state is
6600 called. */
6601 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
6602
6603 /* Other fields: */
6604 inf_status->stop_stack_dummy = stop_stack_dummy;
6605 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6606 inf_status->stop_after_trap = stop_after_trap;
6607
6608 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6609
6610 return inf_status;
6611 }
6612
6613 static int
6614 restore_selected_frame (void *args)
6615 {
6616 struct frame_id *fid = (struct frame_id *) args;
6617 struct frame_info *frame;
6618
6619 frame = frame_find_by_id (*fid);
6620
6621 /* If inf_status->selected_frame_id is NULL, there was no previously
6622 selected frame. */
6623 if (frame == NULL)
6624 {
6625 warning (_("Unable to restore previously selected frame."));
6626 return 0;
6627 }
6628
6629 select_frame (frame);
6630
6631 return (1);
6632 }
6633
6634 /* Restore inferior session state to INF_STATUS. */
6635
6636 void
6637 restore_infcall_control_state (struct infcall_control_state *inf_status)
6638 {
6639 struct thread_info *tp = inferior_thread ();
6640 struct inferior *inf = current_inferior ();
6641
6642 if (tp->control.step_resume_breakpoint)
6643 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
6644
6645 if (tp->control.exception_resume_breakpoint)
6646 tp->control.exception_resume_breakpoint->disposition
6647 = disp_del_at_next_stop;
6648
6649 /* Handle the bpstat_copy of the chain. */
6650 bpstat_clear (&tp->control.stop_bpstat);
6651
6652 tp->control = inf_status->thread_control;
6653 inf->control = inf_status->inferior_control;
6654
6655 /* Other fields: */
6656 stop_stack_dummy = inf_status->stop_stack_dummy;
6657 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6658 stop_after_trap = inf_status->stop_after_trap;
6659
6660 if (target_has_stack)
6661 {
6662 /* The point of catch_errors is that if the stack is clobbered,
6663 walking the stack might encounter a garbage pointer and
6664 error() trying to dereference it. */
6665 if (catch_errors
6666 (restore_selected_frame, &inf_status->selected_frame_id,
6667 "Unable to restore previously selected frame:\n",
6668 RETURN_MASK_ERROR) == 0)
6669 /* Error in restoring the selected frame. Select the innermost
6670 frame. */
6671 select_frame (get_current_frame ());
6672 }
6673
6674 xfree (inf_status);
6675 }
6676
6677 static void
6678 do_restore_infcall_control_state_cleanup (void *sts)
6679 {
6680 restore_infcall_control_state (sts);
6681 }
6682
6683 struct cleanup *
6684 make_cleanup_restore_infcall_control_state
6685 (struct infcall_control_state *inf_status)
6686 {
6687 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
6688 }
6689
6690 void
6691 discard_infcall_control_state (struct infcall_control_state *inf_status)
6692 {
6693 if (inf_status->thread_control.step_resume_breakpoint)
6694 inf_status->thread_control.step_resume_breakpoint->disposition
6695 = disp_del_at_next_stop;
6696
6697 if (inf_status->thread_control.exception_resume_breakpoint)
6698 inf_status->thread_control.exception_resume_breakpoint->disposition
6699 = disp_del_at_next_stop;
6700
6701 /* See save_infcall_control_state for info on stop_bpstat. */
6702 bpstat_clear (&inf_status->thread_control.stop_bpstat);
6703
6704 xfree (inf_status);
6705 }
6706 \f
6707 int
6708 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6709 {
6710 struct target_waitstatus last;
6711 ptid_t last_ptid;
6712
6713 get_last_target_status (&last_ptid, &last);
6714
6715 if (last.kind != TARGET_WAITKIND_FORKED)
6716 return 0;
6717
6718 if (!ptid_equal (last_ptid, pid))
6719 return 0;
6720
6721 *child_pid = last.value.related_pid;
6722 return 1;
6723 }
6724
6725 int
6726 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6727 {
6728 struct target_waitstatus last;
6729 ptid_t last_ptid;
6730
6731 get_last_target_status (&last_ptid, &last);
6732
6733 if (last.kind != TARGET_WAITKIND_VFORKED)
6734 return 0;
6735
6736 if (!ptid_equal (last_ptid, pid))
6737 return 0;
6738
6739 *child_pid = last.value.related_pid;
6740 return 1;
6741 }
6742
6743 int
6744 inferior_has_execd (ptid_t pid, char **execd_pathname)
6745 {
6746 struct target_waitstatus last;
6747 ptid_t last_ptid;
6748
6749 get_last_target_status (&last_ptid, &last);
6750
6751 if (last.kind != TARGET_WAITKIND_EXECD)
6752 return 0;
6753
6754 if (!ptid_equal (last_ptid, pid))
6755 return 0;
6756
6757 *execd_pathname = xstrdup (last.value.execd_pathname);
6758 return 1;
6759 }
6760
6761 int
6762 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6763 {
6764 struct target_waitstatus last;
6765 ptid_t last_ptid;
6766
6767 get_last_target_status (&last_ptid, &last);
6768
6769 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6770 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6771 return 0;
6772
6773 if (!ptid_equal (last_ptid, pid))
6774 return 0;
6775
6776 *syscall_number = last.value.syscall_number;
6777 return 1;
6778 }
6779
6780 /* Oft used ptids */
6781 ptid_t null_ptid;
6782 ptid_t minus_one_ptid;
6783
6784 /* Create a ptid given the necessary PID, LWP, and TID components. */
6785
6786 ptid_t
6787 ptid_build (int pid, long lwp, long tid)
6788 {
6789 ptid_t ptid;
6790
6791 ptid.pid = pid;
6792 ptid.lwp = lwp;
6793 ptid.tid = tid;
6794 return ptid;
6795 }
6796
6797 /* Create a ptid from just a pid. */
6798
6799 ptid_t
6800 pid_to_ptid (int pid)
6801 {
6802 return ptid_build (pid, 0, 0);
6803 }
6804
6805 /* Fetch the pid (process id) component from a ptid. */
6806
6807 int
6808 ptid_get_pid (ptid_t ptid)
6809 {
6810 return ptid.pid;
6811 }
6812
6813 /* Fetch the lwp (lightweight process) component from a ptid. */
6814
6815 long
6816 ptid_get_lwp (ptid_t ptid)
6817 {
6818 return ptid.lwp;
6819 }
6820
6821 /* Fetch the tid (thread id) component from a ptid. */
6822
6823 long
6824 ptid_get_tid (ptid_t ptid)
6825 {
6826 return ptid.tid;
6827 }
6828
6829 /* ptid_equal() is used to test equality of two ptids. */
6830
6831 int
6832 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6833 {
6834 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6835 && ptid1.tid == ptid2.tid);
6836 }
6837
6838 /* Returns true if PTID represents a process. */
6839
6840 int
6841 ptid_is_pid (ptid_t ptid)
6842 {
6843 if (ptid_equal (minus_one_ptid, ptid))
6844 return 0;
6845 if (ptid_equal (null_ptid, ptid))
6846 return 0;
6847
6848 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6849 }
6850
6851 int
6852 ptid_match (ptid_t ptid, ptid_t filter)
6853 {
6854 if (ptid_equal (filter, minus_one_ptid))
6855 return 1;
6856 if (ptid_is_pid (filter)
6857 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6858 return 1;
6859 else if (ptid_equal (ptid, filter))
6860 return 1;
6861
6862 return 0;
6863 }
6864
6865 /* restore_inferior_ptid() will be used by the cleanup machinery
6866 to restore the inferior_ptid value saved in a call to
6867 save_inferior_ptid(). */
6868
6869 static void
6870 restore_inferior_ptid (void *arg)
6871 {
6872 ptid_t *saved_ptid_ptr = arg;
6873
6874 inferior_ptid = *saved_ptid_ptr;
6875 xfree (arg);
6876 }
6877
6878 /* Save the value of inferior_ptid so that it may be restored by a
6879 later call to do_cleanups(). Returns the struct cleanup pointer
6880 needed for later doing the cleanup. */
6881
6882 struct cleanup *
6883 save_inferior_ptid (void)
6884 {
6885 ptid_t *saved_ptid_ptr;
6886
6887 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6888 *saved_ptid_ptr = inferior_ptid;
6889 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6890 }
6891 \f
6892
6893 /* User interface for reverse debugging:
6894 Set exec-direction / show exec-direction commands
6895 (returns error unless target implements to_set_exec_direction method). */
6896
6897 int execution_direction = EXEC_FORWARD;
6898 static const char exec_forward[] = "forward";
6899 static const char exec_reverse[] = "reverse";
6900 static const char *exec_direction = exec_forward;
6901 static const char *exec_direction_names[] = {
6902 exec_forward,
6903 exec_reverse,
6904 NULL
6905 };
6906
6907 static void
6908 set_exec_direction_func (char *args, int from_tty,
6909 struct cmd_list_element *cmd)
6910 {
6911 if (target_can_execute_reverse)
6912 {
6913 if (!strcmp (exec_direction, exec_forward))
6914 execution_direction = EXEC_FORWARD;
6915 else if (!strcmp (exec_direction, exec_reverse))
6916 execution_direction = EXEC_REVERSE;
6917 }
6918 else
6919 {
6920 exec_direction = exec_forward;
6921 error (_("Target does not support this operation."));
6922 }
6923 }
6924
6925 static void
6926 show_exec_direction_func (struct ui_file *out, int from_tty,
6927 struct cmd_list_element *cmd, const char *value)
6928 {
6929 switch (execution_direction) {
6930 case EXEC_FORWARD:
6931 fprintf_filtered (out, _("Forward.\n"));
6932 break;
6933 case EXEC_REVERSE:
6934 fprintf_filtered (out, _("Reverse.\n"));
6935 break;
6936 default:
6937 internal_error (__FILE__, __LINE__,
6938 _("bogus execution_direction value: %d"),
6939 (int) execution_direction);
6940 }
6941 }
6942
6943 /* User interface for non-stop mode. */
6944
6945 int non_stop = 0;
6946
6947 static void
6948 set_non_stop (char *args, int from_tty,
6949 struct cmd_list_element *c)
6950 {
6951 if (target_has_execution)
6952 {
6953 non_stop_1 = non_stop;
6954 error (_("Cannot change this setting while the inferior is running."));
6955 }
6956
6957 non_stop = non_stop_1;
6958 }
6959
6960 static void
6961 show_non_stop (struct ui_file *file, int from_tty,
6962 struct cmd_list_element *c, const char *value)
6963 {
6964 fprintf_filtered (file,
6965 _("Controlling the inferior in non-stop mode is %s.\n"),
6966 value);
6967 }
6968
6969 static void
6970 show_schedule_multiple (struct ui_file *file, int from_tty,
6971 struct cmd_list_element *c, const char *value)
6972 {
6973 fprintf_filtered (file, _("Resuming the execution of threads "
6974 "of all processes is %s.\n"), value);
6975 }
6976
6977 void
6978 _initialize_infrun (void)
6979 {
6980 int i;
6981 int numsigs;
6982
6983 add_info ("signals", signals_info, _("\
6984 What debugger does when program gets various signals.\n\
6985 Specify a signal as argument to print info on that signal only."));
6986 add_info_alias ("handle", "signals", 0);
6987
6988 add_com ("handle", class_run, handle_command, _("\
6989 Specify how to handle a signal.\n\
6990 Args are signals and actions to apply to those signals.\n\
6991 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6992 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6993 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6994 The special arg \"all\" is recognized to mean all signals except those\n\
6995 used by the debugger, typically SIGTRAP and SIGINT.\n\
6996 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6997 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6998 Stop means reenter debugger if this signal happens (implies print).\n\
6999 Print means print a message if this signal happens.\n\
7000 Pass means let program see this signal; otherwise program doesn't know.\n\
7001 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7002 Pass and Stop may be combined."));
7003 if (xdb_commands)
7004 {
7005 add_com ("lz", class_info, signals_info, _("\
7006 What debugger does when program gets various signals.\n\
7007 Specify a signal as argument to print info on that signal only."));
7008 add_com ("z", class_run, xdb_handle_command, _("\
7009 Specify how to handle a signal.\n\
7010 Args are signals and actions to apply to those signals.\n\
7011 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7012 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7013 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7014 The special arg \"all\" is recognized to mean all signals except those\n\
7015 used by the debugger, typically SIGTRAP and SIGINT.\n\
7016 Recognized actions include \"s\" (toggles between stop and nostop),\n\
7017 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
7018 nopass), \"Q\" (noprint)\n\
7019 Stop means reenter debugger if this signal happens (implies print).\n\
7020 Print means print a message if this signal happens.\n\
7021 Pass means let program see this signal; otherwise program doesn't know.\n\
7022 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7023 Pass and Stop may be combined."));
7024 }
7025
7026 if (!dbx_commands)
7027 stop_command = add_cmd ("stop", class_obscure,
7028 not_just_help_class_command, _("\
7029 There is no `stop' command, but you can set a hook on `stop'.\n\
7030 This allows you to set a list of commands to be run each time execution\n\
7031 of the program stops."), &cmdlist);
7032
7033 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7034 Set inferior debugging."), _("\
7035 Show inferior debugging."), _("\
7036 When non-zero, inferior specific debugging is enabled."),
7037 NULL,
7038 show_debug_infrun,
7039 &setdebuglist, &showdebuglist);
7040
7041 add_setshow_boolean_cmd ("displaced", class_maintenance,
7042 &debug_displaced, _("\
7043 Set displaced stepping debugging."), _("\
7044 Show displaced stepping debugging."), _("\
7045 When non-zero, displaced stepping specific debugging is enabled."),
7046 NULL,
7047 show_debug_displaced,
7048 &setdebuglist, &showdebuglist);
7049
7050 add_setshow_boolean_cmd ("non-stop", no_class,
7051 &non_stop_1, _("\
7052 Set whether gdb controls the inferior in non-stop mode."), _("\
7053 Show whether gdb controls the inferior in non-stop mode."), _("\
7054 When debugging a multi-threaded program and this setting is\n\
7055 off (the default, also called all-stop mode), when one thread stops\n\
7056 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7057 all other threads in the program while you interact with the thread of\n\
7058 interest. When you continue or step a thread, you can allow the other\n\
7059 threads to run, or have them remain stopped, but while you inspect any\n\
7060 thread's state, all threads stop.\n\
7061 \n\
7062 In non-stop mode, when one thread stops, other threads can continue\n\
7063 to run freely. You'll be able to step each thread independently,\n\
7064 leave it stopped or free to run as needed."),
7065 set_non_stop,
7066 show_non_stop,
7067 &setlist,
7068 &showlist);
7069
7070 numsigs = (int) TARGET_SIGNAL_LAST;
7071 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7072 signal_print = (unsigned char *)
7073 xmalloc (sizeof (signal_print[0]) * numsigs);
7074 signal_program = (unsigned char *)
7075 xmalloc (sizeof (signal_program[0]) * numsigs);
7076 signal_pass = (unsigned char *)
7077 xmalloc (sizeof (signal_program[0]) * numsigs);
7078 for (i = 0; i < numsigs; i++)
7079 {
7080 signal_stop[i] = 1;
7081 signal_print[i] = 1;
7082 signal_program[i] = 1;
7083 }
7084
7085 /* Signals caused by debugger's own actions
7086 should not be given to the program afterwards. */
7087 signal_program[TARGET_SIGNAL_TRAP] = 0;
7088 signal_program[TARGET_SIGNAL_INT] = 0;
7089
7090 /* Signals that are not errors should not normally enter the debugger. */
7091 signal_stop[TARGET_SIGNAL_ALRM] = 0;
7092 signal_print[TARGET_SIGNAL_ALRM] = 0;
7093 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
7094 signal_print[TARGET_SIGNAL_VTALRM] = 0;
7095 signal_stop[TARGET_SIGNAL_PROF] = 0;
7096 signal_print[TARGET_SIGNAL_PROF] = 0;
7097 signal_stop[TARGET_SIGNAL_CHLD] = 0;
7098 signal_print[TARGET_SIGNAL_CHLD] = 0;
7099 signal_stop[TARGET_SIGNAL_IO] = 0;
7100 signal_print[TARGET_SIGNAL_IO] = 0;
7101 signal_stop[TARGET_SIGNAL_POLL] = 0;
7102 signal_print[TARGET_SIGNAL_POLL] = 0;
7103 signal_stop[TARGET_SIGNAL_URG] = 0;
7104 signal_print[TARGET_SIGNAL_URG] = 0;
7105 signal_stop[TARGET_SIGNAL_WINCH] = 0;
7106 signal_print[TARGET_SIGNAL_WINCH] = 0;
7107 signal_stop[TARGET_SIGNAL_PRIO] = 0;
7108 signal_print[TARGET_SIGNAL_PRIO] = 0;
7109
7110 /* These signals are used internally by user-level thread
7111 implementations. (See signal(5) on Solaris.) Like the above
7112 signals, a healthy program receives and handles them as part of
7113 its normal operation. */
7114 signal_stop[TARGET_SIGNAL_LWP] = 0;
7115 signal_print[TARGET_SIGNAL_LWP] = 0;
7116 signal_stop[TARGET_SIGNAL_WAITING] = 0;
7117 signal_print[TARGET_SIGNAL_WAITING] = 0;
7118 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
7119 signal_print[TARGET_SIGNAL_CANCEL] = 0;
7120
7121 /* Update cached state. */
7122 signal_cache_update (-1);
7123
7124 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7125 &stop_on_solib_events, _("\
7126 Set stopping for shared library events."), _("\
7127 Show stopping for shared library events."), _("\
7128 If nonzero, gdb will give control to the user when the dynamic linker\n\
7129 notifies gdb of shared library events. The most common event of interest\n\
7130 to the user would be loading/unloading of a new library."),
7131 NULL,
7132 show_stop_on_solib_events,
7133 &setlist, &showlist);
7134
7135 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7136 follow_fork_mode_kind_names,
7137 &follow_fork_mode_string, _("\
7138 Set debugger response to a program call of fork or vfork."), _("\
7139 Show debugger response to a program call of fork or vfork."), _("\
7140 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7141 parent - the original process is debugged after a fork\n\
7142 child - the new process is debugged after a fork\n\
7143 The unfollowed process will continue to run.\n\
7144 By default, the debugger will follow the parent process."),
7145 NULL,
7146 show_follow_fork_mode_string,
7147 &setlist, &showlist);
7148
7149 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7150 follow_exec_mode_names,
7151 &follow_exec_mode_string, _("\
7152 Set debugger response to a program call of exec."), _("\
7153 Show debugger response to a program call of exec."), _("\
7154 An exec call replaces the program image of a process.\n\
7155 \n\
7156 follow-exec-mode can be:\n\
7157 \n\
7158 new - the debugger creates a new inferior and rebinds the process\n\
7159 to this new inferior. The program the process was running before\n\
7160 the exec call can be restarted afterwards by restarting the original\n\
7161 inferior.\n\
7162 \n\
7163 same - the debugger keeps the process bound to the same inferior.\n\
7164 The new executable image replaces the previous executable loaded in\n\
7165 the inferior. Restarting the inferior after the exec call restarts\n\
7166 the executable the process was running after the exec call.\n\
7167 \n\
7168 By default, the debugger will use the same inferior."),
7169 NULL,
7170 show_follow_exec_mode_string,
7171 &setlist, &showlist);
7172
7173 add_setshow_enum_cmd ("scheduler-locking", class_run,
7174 scheduler_enums, &scheduler_mode, _("\
7175 Set mode for locking scheduler during execution."), _("\
7176 Show mode for locking scheduler during execution."), _("\
7177 off == no locking (threads may preempt at any time)\n\
7178 on == full locking (no thread except the current thread may run)\n\
7179 step == scheduler locked during every single-step operation.\n\
7180 In this mode, no other thread may run during a step command.\n\
7181 Other threads may run while stepping over a function call ('next')."),
7182 set_schedlock_func, /* traps on target vector */
7183 show_scheduler_mode,
7184 &setlist, &showlist);
7185
7186 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7187 Set mode for resuming threads of all processes."), _("\
7188 Show mode for resuming threads of all processes."), _("\
7189 When on, execution commands (such as 'continue' or 'next') resume all\n\
7190 threads of all processes. When off (which is the default), execution\n\
7191 commands only resume the threads of the current process. The set of\n\
7192 threads that are resumed is further refined by the scheduler-locking\n\
7193 mode (see help set scheduler-locking)."),
7194 NULL,
7195 show_schedule_multiple,
7196 &setlist, &showlist);
7197
7198 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7199 Set mode of the step operation."), _("\
7200 Show mode of the step operation."), _("\
7201 When set, doing a step over a function without debug line information\n\
7202 will stop at the first instruction of that function. Otherwise, the\n\
7203 function is skipped and the step command stops at a different source line."),
7204 NULL,
7205 show_step_stop_if_no_debug,
7206 &setlist, &showlist);
7207
7208 add_setshow_enum_cmd ("displaced-stepping", class_run,
7209 can_use_displaced_stepping_enum,
7210 &can_use_displaced_stepping, _("\
7211 Set debugger's willingness to use displaced stepping."), _("\
7212 Show debugger's willingness to use displaced stepping."), _("\
7213 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7214 supported by the target architecture. If off, gdb will not use displaced\n\
7215 stepping to step over breakpoints, even if such is supported by the target\n\
7216 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7217 if the target architecture supports it and non-stop mode is active, but will not\n\
7218 use it in all-stop mode (see help set non-stop)."),
7219 NULL,
7220 show_can_use_displaced_stepping,
7221 &setlist, &showlist);
7222
7223 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7224 &exec_direction, _("Set direction of execution.\n\
7225 Options are 'forward' or 'reverse'."),
7226 _("Show direction of execution (forward/reverse)."),
7227 _("Tells gdb whether to execute forward or backward."),
7228 set_exec_direction_func, show_exec_direction_func,
7229 &setlist, &showlist);
7230
7231 /* Set/show detach-on-fork: user-settable mode. */
7232
7233 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7234 Set whether gdb will detach the child of a fork."), _("\
7235 Show whether gdb will detach the child of a fork."), _("\
7236 Tells gdb whether to detach the child of a fork."),
7237 NULL, NULL, &setlist, &showlist);
7238
7239 /* ptid initializations */
7240 null_ptid = ptid_build (0, 0, 0);
7241 minus_one_ptid = ptid_build (-1, 0, 0);
7242 inferior_ptid = null_ptid;
7243 target_last_wait_ptid = minus_one_ptid;
7244
7245 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7246 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7247 observer_attach_thread_exit (infrun_thread_thread_exit);
7248 observer_attach_inferior_exit (infrun_inferior_exit);
7249
7250 /* Explicitly create without lookup, since that tries to create a
7251 value with a void typed value, and when we get here, gdbarch
7252 isn't initialized yet. At this point, we're quite sure there
7253 isn't another convenience variable of the same name. */
7254 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
7255
7256 add_setshow_boolean_cmd ("observer", no_class,
7257 &observer_mode_1, _("\
7258 Set whether gdb controls the inferior in observer mode."), _("\
7259 Show whether gdb controls the inferior in observer mode."), _("\
7260 In observer mode, GDB can get data from the inferior, but not\n\
7261 affect its execution. Registers and memory may not be changed,\n\
7262 breakpoints may not be set, and the program cannot be interrupted\n\
7263 or signalled."),
7264 set_observer_mode,
7265 show_observer_mode,
7266 &setlist,
7267 &showlist);
7268 }
This page took 0.198056 seconds and 4 git commands to generate.