b21fd3f33e4aebc2da14c1b075afca905cb9cd40
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 static void print_exited_reason (int exitstatus);
87
88 static void print_signal_exited_reason (enum target_signal siggnal);
89
90 static void print_no_history_reason (void);
91
92 static void print_signal_received_reason (enum target_signal siggnal);
93
94 static void print_end_stepping_range_reason (void);
95
96 void _initialize_infrun (void);
97
98 void nullify_last_target_wait_ptid (void);
99
100 /* When set, stop the 'step' command if we enter a function which has
101 no line number information. The normal behavior is that we step
102 over such function. */
103 int step_stop_if_no_debug = 0;
104 static void
105 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
106 struct cmd_list_element *c, const char *value)
107 {
108 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
109 }
110
111 /* In asynchronous mode, but simulating synchronous execution. */
112
113 int sync_execution = 0;
114
115 /* wait_for_inferior and normal_stop use this to notify the user
116 when the inferior stopped in a different thread than it had been
117 running in. */
118
119 static ptid_t previous_inferior_ptid;
120
121 /* Default behavior is to detach newly forked processes (legacy). */
122 int detach_fork = 1;
123
124 int debug_displaced = 0;
125 static void
126 show_debug_displaced (struct ui_file *file, int from_tty,
127 struct cmd_list_element *c, const char *value)
128 {
129 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
130 }
131
132 int debug_infrun = 0;
133 static void
134 show_debug_infrun (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
138 }
139
140 /* If the program uses ELF-style shared libraries, then calls to
141 functions in shared libraries go through stubs, which live in a
142 table called the PLT (Procedure Linkage Table). The first time the
143 function is called, the stub sends control to the dynamic linker,
144 which looks up the function's real address, patches the stub so
145 that future calls will go directly to the function, and then passes
146 control to the function.
147
148 If we are stepping at the source level, we don't want to see any of
149 this --- we just want to skip over the stub and the dynamic linker.
150 The simple approach is to single-step until control leaves the
151 dynamic linker.
152
153 However, on some systems (e.g., Red Hat's 5.2 distribution) the
154 dynamic linker calls functions in the shared C library, so you
155 can't tell from the PC alone whether the dynamic linker is still
156 running. In this case, we use a step-resume breakpoint to get us
157 past the dynamic linker, as if we were using "next" to step over a
158 function call.
159
160 in_solib_dynsym_resolve_code() says whether we're in the dynamic
161 linker code or not. Normally, this means we single-step. However,
162 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
163 address where we can place a step-resume breakpoint to get past the
164 linker's symbol resolution function.
165
166 in_solib_dynsym_resolve_code() can generally be implemented in a
167 pretty portable way, by comparing the PC against the address ranges
168 of the dynamic linker's sections.
169
170 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
171 it depends on internal details of the dynamic linker. It's usually
172 not too hard to figure out where to put a breakpoint, but it
173 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
174 sanity checking. If it can't figure things out, returning zero and
175 getting the (possibly confusing) stepping behavior is better than
176 signalling an error, which will obscure the change in the
177 inferior's state. */
178
179 /* This function returns TRUE if pc is the address of an instruction
180 that lies within the dynamic linker (such as the event hook, or the
181 dld itself).
182
183 This function must be used only when a dynamic linker event has
184 been caught, and the inferior is being stepped out of the hook, or
185 undefined results are guaranteed. */
186
187 #ifndef SOLIB_IN_DYNAMIC_LINKER
188 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
189 #endif
190
191 /* "Observer mode" is somewhat like a more extreme version of
192 non-stop, in which all GDB operations that might affect the
193 target's execution have been disabled. */
194
195 static int non_stop_1 = 0;
196
197 int observer_mode = 0;
198 static int observer_mode_1 = 0;
199
200 static void
201 set_observer_mode (char *args, int from_tty,
202 struct cmd_list_element *c)
203 {
204 extern int pagination_enabled;
205
206 if (target_has_execution)
207 {
208 observer_mode_1 = observer_mode;
209 error (_("Cannot change this setting while the inferior is running."));
210 }
211
212 observer_mode = observer_mode_1;
213
214 may_write_registers = !observer_mode;
215 may_write_memory = !observer_mode;
216 may_insert_breakpoints = !observer_mode;
217 may_insert_tracepoints = !observer_mode;
218 /* We can insert fast tracepoints in or out of observer mode,
219 but enable them if we're going into this mode. */
220 if (observer_mode)
221 may_insert_fast_tracepoints = 1;
222 may_stop = !observer_mode;
223 update_target_permissions ();
224
225 /* Going *into* observer mode we must force non-stop, then
226 going out we leave it that way. */
227 if (observer_mode)
228 {
229 target_async_permitted = 1;
230 pagination_enabled = 0;
231 non_stop = non_stop_1 = 1;
232 }
233
234 if (from_tty)
235 printf_filtered (_("Observer mode is now %s.\n"),
236 (observer_mode ? "on" : "off"));
237 }
238
239 static void
240 show_observer_mode (struct ui_file *file, int from_tty,
241 struct cmd_list_element *c, const char *value)
242 {
243 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
244 }
245
246 /* This updates the value of observer mode based on changes in
247 permissions. Note that we are deliberately ignoring the values of
248 may-write-registers and may-write-memory, since the user may have
249 reason to enable these during a session, for instance to turn on a
250 debugging-related global. */
251
252 void
253 update_observer_mode (void)
254 {
255 int newval;
256
257 newval = (!may_insert_breakpoints
258 && !may_insert_tracepoints
259 && may_insert_fast_tracepoints
260 && !may_stop
261 && non_stop);
262
263 /* Let the user know if things change. */
264 if (newval != observer_mode)
265 printf_filtered (_("Observer mode is now %s.\n"),
266 (newval ? "on" : "off"));
267
268 observer_mode = observer_mode_1 = newval;
269 }
270
271 /* Tables of how to react to signals; the user sets them. */
272
273 static unsigned char *signal_stop;
274 static unsigned char *signal_print;
275 static unsigned char *signal_program;
276
277 #define SET_SIGS(nsigs,sigs,flags) \
278 do { \
279 int signum = (nsigs); \
280 while (signum-- > 0) \
281 if ((sigs)[signum]) \
282 (flags)[signum] = 1; \
283 } while (0)
284
285 #define UNSET_SIGS(nsigs,sigs,flags) \
286 do { \
287 int signum = (nsigs); \
288 while (signum-- > 0) \
289 if ((sigs)[signum]) \
290 (flags)[signum] = 0; \
291 } while (0)
292
293 /* Value to pass to target_resume() to cause all threads to resume */
294
295 #define RESUME_ALL minus_one_ptid
296
297 /* Command list pointer for the "stop" placeholder. */
298
299 static struct cmd_list_element *stop_command;
300
301 /* Function inferior was in as of last step command. */
302
303 static struct symbol *step_start_function;
304
305 /* Nonzero if we want to give control to the user when we're notified
306 of shared library events by the dynamic linker. */
307 int stop_on_solib_events;
308 static void
309 show_stop_on_solib_events (struct ui_file *file, int from_tty,
310 struct cmd_list_element *c, const char *value)
311 {
312 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
313 value);
314 }
315
316 /* Nonzero means expecting a trace trap
317 and should stop the inferior and return silently when it happens. */
318
319 int stop_after_trap;
320
321 /* Save register contents here when executing a "finish" command or are
322 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
323 Thus this contains the return value from the called function (assuming
324 values are returned in a register). */
325
326 struct regcache *stop_registers;
327
328 /* Nonzero after stop if current stack frame should be printed. */
329
330 static int stop_print_frame;
331
332 /* This is a cached copy of the pid/waitstatus of the last event
333 returned by target_wait()/deprecated_target_wait_hook(). This
334 information is returned by get_last_target_status(). */
335 static ptid_t target_last_wait_ptid;
336 static struct target_waitstatus target_last_waitstatus;
337
338 static void context_switch (ptid_t ptid);
339
340 void init_thread_stepping_state (struct thread_info *tss);
341
342 void init_infwait_state (void);
343
344 static const char follow_fork_mode_child[] = "child";
345 static const char follow_fork_mode_parent[] = "parent";
346
347 static const char *follow_fork_mode_kind_names[] = {
348 follow_fork_mode_child,
349 follow_fork_mode_parent,
350 NULL
351 };
352
353 static const char *follow_fork_mode_string = follow_fork_mode_parent;
354 static void
355 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("\
359 Debugger response to a program call of fork or vfork is \"%s\".\n"),
360 value);
361 }
362 \f
363
364 /* Tell the target to follow the fork we're stopped at. Returns true
365 if the inferior should be resumed; false, if the target for some
366 reason decided it's best not to resume. */
367
368 static int
369 follow_fork (void)
370 {
371 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
372 int should_resume = 1;
373 struct thread_info *tp;
374
375 /* Copy user stepping state to the new inferior thread. FIXME: the
376 followed fork child thread should have a copy of most of the
377 parent thread structure's run control related fields, not just these.
378 Initialized to avoid "may be used uninitialized" warnings from gcc. */
379 struct breakpoint *step_resume_breakpoint = NULL;
380 CORE_ADDR step_range_start = 0;
381 CORE_ADDR step_range_end = 0;
382 struct frame_id step_frame_id = { 0 };
383
384 if (!non_stop)
385 {
386 ptid_t wait_ptid;
387 struct target_waitstatus wait_status;
388
389 /* Get the last target status returned by target_wait(). */
390 get_last_target_status (&wait_ptid, &wait_status);
391
392 /* If not stopped at a fork event, then there's nothing else to
393 do. */
394 if (wait_status.kind != TARGET_WAITKIND_FORKED
395 && wait_status.kind != TARGET_WAITKIND_VFORKED)
396 return 1;
397
398 /* Check if we switched over from WAIT_PTID, since the event was
399 reported. */
400 if (!ptid_equal (wait_ptid, minus_one_ptid)
401 && !ptid_equal (inferior_ptid, wait_ptid))
402 {
403 /* We did. Switch back to WAIT_PTID thread, to tell the
404 target to follow it (in either direction). We'll
405 afterwards refuse to resume, and inform the user what
406 happened. */
407 switch_to_thread (wait_ptid);
408 should_resume = 0;
409 }
410 }
411
412 tp = inferior_thread ();
413
414 /* If there were any forks/vforks that were caught and are now to be
415 followed, then do so now. */
416 switch (tp->pending_follow.kind)
417 {
418 case TARGET_WAITKIND_FORKED:
419 case TARGET_WAITKIND_VFORKED:
420 {
421 ptid_t parent, child;
422
423 /* If the user did a next/step, etc, over a fork call,
424 preserve the stepping state in the fork child. */
425 if (follow_child && should_resume)
426 {
427 step_resume_breakpoint
428 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
429 step_range_start = tp->step_range_start;
430 step_range_end = tp->step_range_end;
431 step_frame_id = tp->step_frame_id;
432
433 /* For now, delete the parent's sr breakpoint, otherwise,
434 parent/child sr breakpoints are considered duplicates,
435 and the child version will not be installed. Remove
436 this when the breakpoints module becomes aware of
437 inferiors and address spaces. */
438 delete_step_resume_breakpoint (tp);
439 tp->step_range_start = 0;
440 tp->step_range_end = 0;
441 tp->step_frame_id = null_frame_id;
442 }
443
444 parent = inferior_ptid;
445 child = tp->pending_follow.value.related_pid;
446
447 /* Tell the target to do whatever is necessary to follow
448 either parent or child. */
449 if (target_follow_fork (follow_child))
450 {
451 /* Target refused to follow, or there's some other reason
452 we shouldn't resume. */
453 should_resume = 0;
454 }
455 else
456 {
457 /* This pending follow fork event is now handled, one way
458 or another. The previous selected thread may be gone
459 from the lists by now, but if it is still around, need
460 to clear the pending follow request. */
461 tp = find_thread_ptid (parent);
462 if (tp)
463 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
464
465 /* This makes sure we don't try to apply the "Switched
466 over from WAIT_PID" logic above. */
467 nullify_last_target_wait_ptid ();
468
469 /* If we followed the child, switch to it... */
470 if (follow_child)
471 {
472 switch_to_thread (child);
473
474 /* ... and preserve the stepping state, in case the
475 user was stepping over the fork call. */
476 if (should_resume)
477 {
478 tp = inferior_thread ();
479 tp->step_resume_breakpoint = step_resume_breakpoint;
480 tp->step_range_start = step_range_start;
481 tp->step_range_end = step_range_end;
482 tp->step_frame_id = step_frame_id;
483 }
484 else
485 {
486 /* If we get here, it was because we're trying to
487 resume from a fork catchpoint, but, the user
488 has switched threads away from the thread that
489 forked. In that case, the resume command
490 issued is most likely not applicable to the
491 child, so just warn, and refuse to resume. */
492 warning (_("\
493 Not resuming: switched threads before following fork child.\n"));
494 }
495
496 /* Reset breakpoints in the child as appropriate. */
497 follow_inferior_reset_breakpoints ();
498 }
499 else
500 switch_to_thread (parent);
501 }
502 }
503 break;
504 case TARGET_WAITKIND_SPURIOUS:
505 /* Nothing to follow. */
506 break;
507 default:
508 internal_error (__FILE__, __LINE__,
509 "Unexpected pending_follow.kind %d\n",
510 tp->pending_follow.kind);
511 break;
512 }
513
514 return should_resume;
515 }
516
517 void
518 follow_inferior_reset_breakpoints (void)
519 {
520 struct thread_info *tp = inferior_thread ();
521
522 /* Was there a step_resume breakpoint? (There was if the user
523 did a "next" at the fork() call.) If so, explicitly reset its
524 thread number.
525
526 step_resumes are a form of bp that are made to be per-thread.
527 Since we created the step_resume bp when the parent process
528 was being debugged, and now are switching to the child process,
529 from the breakpoint package's viewpoint, that's a switch of
530 "threads". We must update the bp's notion of which thread
531 it is for, or it'll be ignored when it triggers. */
532
533 if (tp->step_resume_breakpoint)
534 breakpoint_re_set_thread (tp->step_resume_breakpoint);
535
536 /* Reinsert all breakpoints in the child. The user may have set
537 breakpoints after catching the fork, in which case those
538 were never set in the child, but only in the parent. This makes
539 sure the inserted breakpoints match the breakpoint list. */
540
541 breakpoint_re_set ();
542 insert_breakpoints ();
543 }
544
545 /* The child has exited or execed: resume threads of the parent the
546 user wanted to be executing. */
547
548 static int
549 proceed_after_vfork_done (struct thread_info *thread,
550 void *arg)
551 {
552 int pid = * (int *) arg;
553
554 if (ptid_get_pid (thread->ptid) == pid
555 && is_running (thread->ptid)
556 && !is_executing (thread->ptid)
557 && !thread->stop_requested
558 && thread->stop_signal == TARGET_SIGNAL_0)
559 {
560 if (debug_infrun)
561 fprintf_unfiltered (gdb_stdlog,
562 "infrun: resuming vfork parent thread %s\n",
563 target_pid_to_str (thread->ptid));
564
565 switch_to_thread (thread->ptid);
566 clear_proceed_status ();
567 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
568 }
569
570 return 0;
571 }
572
573 /* Called whenever we notice an exec or exit event, to handle
574 detaching or resuming a vfork parent. */
575
576 static void
577 handle_vfork_child_exec_or_exit (int exec)
578 {
579 struct inferior *inf = current_inferior ();
580
581 if (inf->vfork_parent)
582 {
583 int resume_parent = -1;
584
585 /* This exec or exit marks the end of the shared memory region
586 between the parent and the child. If the user wanted to
587 detach from the parent, now is the time. */
588
589 if (inf->vfork_parent->pending_detach)
590 {
591 struct thread_info *tp;
592 struct cleanup *old_chain;
593 struct program_space *pspace;
594 struct address_space *aspace;
595
596 /* follow-fork child, detach-on-fork on */
597
598 old_chain = make_cleanup_restore_current_thread ();
599
600 /* We're letting loose of the parent. */
601 tp = any_live_thread_of_process (inf->vfork_parent->pid);
602 switch_to_thread (tp->ptid);
603
604 /* We're about to detach from the parent, which implicitly
605 removes breakpoints from its address space. There's a
606 catch here: we want to reuse the spaces for the child,
607 but, parent/child are still sharing the pspace at this
608 point, although the exec in reality makes the kernel give
609 the child a fresh set of new pages. The problem here is
610 that the breakpoints module being unaware of this, would
611 likely chose the child process to write to the parent
612 address space. Swapping the child temporarily away from
613 the spaces has the desired effect. Yes, this is "sort
614 of" a hack. */
615
616 pspace = inf->pspace;
617 aspace = inf->aspace;
618 inf->aspace = NULL;
619 inf->pspace = NULL;
620
621 if (debug_infrun || info_verbose)
622 {
623 target_terminal_ours ();
624
625 if (exec)
626 fprintf_filtered (gdb_stdlog,
627 "Detaching vfork parent process %d after child exec.\n",
628 inf->vfork_parent->pid);
629 else
630 fprintf_filtered (gdb_stdlog,
631 "Detaching vfork parent process %d after child exit.\n",
632 inf->vfork_parent->pid);
633 }
634
635 target_detach (NULL, 0);
636
637 /* Put it back. */
638 inf->pspace = pspace;
639 inf->aspace = aspace;
640
641 do_cleanups (old_chain);
642 }
643 else if (exec)
644 {
645 /* We're staying attached to the parent, so, really give the
646 child a new address space. */
647 inf->pspace = add_program_space (maybe_new_address_space ());
648 inf->aspace = inf->pspace->aspace;
649 inf->removable = 1;
650 set_current_program_space (inf->pspace);
651
652 resume_parent = inf->vfork_parent->pid;
653
654 /* Break the bonds. */
655 inf->vfork_parent->vfork_child = NULL;
656 }
657 else
658 {
659 struct cleanup *old_chain;
660 struct program_space *pspace;
661
662 /* If this is a vfork child exiting, then the pspace and
663 aspaces were shared with the parent. Since we're
664 reporting the process exit, we'll be mourning all that is
665 found in the address space, and switching to null_ptid,
666 preparing to start a new inferior. But, since we don't
667 want to clobber the parent's address/program spaces, we
668 go ahead and create a new one for this exiting
669 inferior. */
670
671 /* Switch to null_ptid, so that clone_program_space doesn't want
672 to read the selected frame of a dead process. */
673 old_chain = save_inferior_ptid ();
674 inferior_ptid = null_ptid;
675
676 /* This inferior is dead, so avoid giving the breakpoints
677 module the option to write through to it (cloning a
678 program space resets breakpoints). */
679 inf->aspace = NULL;
680 inf->pspace = NULL;
681 pspace = add_program_space (maybe_new_address_space ());
682 set_current_program_space (pspace);
683 inf->removable = 1;
684 clone_program_space (pspace, inf->vfork_parent->pspace);
685 inf->pspace = pspace;
686 inf->aspace = pspace->aspace;
687
688 /* Put back inferior_ptid. We'll continue mourning this
689 inferior. */
690 do_cleanups (old_chain);
691
692 resume_parent = inf->vfork_parent->pid;
693 /* Break the bonds. */
694 inf->vfork_parent->vfork_child = NULL;
695 }
696
697 inf->vfork_parent = NULL;
698
699 gdb_assert (current_program_space == inf->pspace);
700
701 if (non_stop && resume_parent != -1)
702 {
703 /* If the user wanted the parent to be running, let it go
704 free now. */
705 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
706
707 if (debug_infrun)
708 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
709 resume_parent);
710
711 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
712
713 do_cleanups (old_chain);
714 }
715 }
716 }
717
718 /* Enum strings for "set|show displaced-stepping". */
719
720 static const char follow_exec_mode_new[] = "new";
721 static const char follow_exec_mode_same[] = "same";
722 static const char *follow_exec_mode_names[] =
723 {
724 follow_exec_mode_new,
725 follow_exec_mode_same,
726 NULL,
727 };
728
729 static const char *follow_exec_mode_string = follow_exec_mode_same;
730 static void
731 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
732 struct cmd_list_element *c, const char *value)
733 {
734 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
735 }
736
737 /* EXECD_PATHNAME is assumed to be non-NULL. */
738
739 static void
740 follow_exec (ptid_t pid, char *execd_pathname)
741 {
742 struct thread_info *th = inferior_thread ();
743 struct inferior *inf = current_inferior ();
744
745 /* This is an exec event that we actually wish to pay attention to.
746 Refresh our symbol table to the newly exec'd program, remove any
747 momentary bp's, etc.
748
749 If there are breakpoints, they aren't really inserted now,
750 since the exec() transformed our inferior into a fresh set
751 of instructions.
752
753 We want to preserve symbolic breakpoints on the list, since
754 we have hopes that they can be reset after the new a.out's
755 symbol table is read.
756
757 However, any "raw" breakpoints must be removed from the list
758 (e.g., the solib bp's), since their address is probably invalid
759 now.
760
761 And, we DON'T want to call delete_breakpoints() here, since
762 that may write the bp's "shadow contents" (the instruction
763 value that was overwritten witha TRAP instruction). Since
764 we now have a new a.out, those shadow contents aren't valid. */
765
766 mark_breakpoints_out ();
767
768 update_breakpoints_after_exec ();
769
770 /* If there was one, it's gone now. We cannot truly step-to-next
771 statement through an exec(). */
772 th->step_resume_breakpoint = NULL;
773 th->step_range_start = 0;
774 th->step_range_end = 0;
775
776 /* The target reports the exec event to the main thread, even if
777 some other thread does the exec, and even if the main thread was
778 already stopped --- if debugging in non-stop mode, it's possible
779 the user had the main thread held stopped in the previous image
780 --- release it now. This is the same behavior as step-over-exec
781 with scheduler-locking on in all-stop mode. */
782 th->stop_requested = 0;
783
784 /* What is this a.out's name? */
785 printf_unfiltered (_("%s is executing new program: %s\n"),
786 target_pid_to_str (inferior_ptid),
787 execd_pathname);
788
789 /* We've followed the inferior through an exec. Therefore, the
790 inferior has essentially been killed & reborn. */
791
792 gdb_flush (gdb_stdout);
793
794 breakpoint_init_inferior (inf_execd);
795
796 if (gdb_sysroot && *gdb_sysroot)
797 {
798 char *name = alloca (strlen (gdb_sysroot)
799 + strlen (execd_pathname)
800 + 1);
801
802 strcpy (name, gdb_sysroot);
803 strcat (name, execd_pathname);
804 execd_pathname = name;
805 }
806
807 /* Reset the shared library package. This ensures that we get a
808 shlib event when the child reaches "_start", at which point the
809 dld will have had a chance to initialize the child. */
810 /* Also, loading a symbol file below may trigger symbol lookups, and
811 we don't want those to be satisfied by the libraries of the
812 previous incarnation of this process. */
813 no_shared_libraries (NULL, 0);
814
815 if (follow_exec_mode_string == follow_exec_mode_new)
816 {
817 struct program_space *pspace;
818
819 /* The user wants to keep the old inferior and program spaces
820 around. Create a new fresh one, and switch to it. */
821
822 inf = add_inferior (current_inferior ()->pid);
823 pspace = add_program_space (maybe_new_address_space ());
824 inf->pspace = pspace;
825 inf->aspace = pspace->aspace;
826
827 exit_inferior_num_silent (current_inferior ()->num);
828
829 set_current_inferior (inf);
830 set_current_program_space (pspace);
831 }
832
833 gdb_assert (current_program_space == inf->pspace);
834
835 /* That a.out is now the one to use. */
836 exec_file_attach (execd_pathname, 0);
837
838 /* Load the main file's symbols. */
839 symbol_file_add_main (execd_pathname, 0);
840
841 #ifdef SOLIB_CREATE_INFERIOR_HOOK
842 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
843 #else
844 solib_create_inferior_hook (0);
845 #endif
846
847 jit_inferior_created_hook ();
848
849 /* Reinsert all breakpoints. (Those which were symbolic have
850 been reset to the proper address in the new a.out, thanks
851 to symbol_file_command...) */
852 insert_breakpoints ();
853
854 /* The next resume of this inferior should bring it to the shlib
855 startup breakpoints. (If the user had also set bp's on
856 "main" from the old (parent) process, then they'll auto-
857 matically get reset there in the new process.) */
858 }
859
860 /* Non-zero if we just simulating a single-step. This is needed
861 because we cannot remove the breakpoints in the inferior process
862 until after the `wait' in `wait_for_inferior'. */
863 static int singlestep_breakpoints_inserted_p = 0;
864
865 /* The thread we inserted single-step breakpoints for. */
866 static ptid_t singlestep_ptid;
867
868 /* PC when we started this single-step. */
869 static CORE_ADDR singlestep_pc;
870
871 /* If another thread hit the singlestep breakpoint, we save the original
872 thread here so that we can resume single-stepping it later. */
873 static ptid_t saved_singlestep_ptid;
874 static int stepping_past_singlestep_breakpoint;
875
876 /* If not equal to null_ptid, this means that after stepping over breakpoint
877 is finished, we need to switch to deferred_step_ptid, and step it.
878
879 The use case is when one thread has hit a breakpoint, and then the user
880 has switched to another thread and issued 'step'. We need to step over
881 breakpoint in the thread which hit the breakpoint, but then continue
882 stepping the thread user has selected. */
883 static ptid_t deferred_step_ptid;
884 \f
885 /* Displaced stepping. */
886
887 /* In non-stop debugging mode, we must take special care to manage
888 breakpoints properly; in particular, the traditional strategy for
889 stepping a thread past a breakpoint it has hit is unsuitable.
890 'Displaced stepping' is a tactic for stepping one thread past a
891 breakpoint it has hit while ensuring that other threads running
892 concurrently will hit the breakpoint as they should.
893
894 The traditional way to step a thread T off a breakpoint in a
895 multi-threaded program in all-stop mode is as follows:
896
897 a0) Initially, all threads are stopped, and breakpoints are not
898 inserted.
899 a1) We single-step T, leaving breakpoints uninserted.
900 a2) We insert breakpoints, and resume all threads.
901
902 In non-stop debugging, however, this strategy is unsuitable: we
903 don't want to have to stop all threads in the system in order to
904 continue or step T past a breakpoint. Instead, we use displaced
905 stepping:
906
907 n0) Initially, T is stopped, other threads are running, and
908 breakpoints are inserted.
909 n1) We copy the instruction "under" the breakpoint to a separate
910 location, outside the main code stream, making any adjustments
911 to the instruction, register, and memory state as directed by
912 T's architecture.
913 n2) We single-step T over the instruction at its new location.
914 n3) We adjust the resulting register and memory state as directed
915 by T's architecture. This includes resetting T's PC to point
916 back into the main instruction stream.
917 n4) We resume T.
918
919 This approach depends on the following gdbarch methods:
920
921 - gdbarch_max_insn_length and gdbarch_displaced_step_location
922 indicate where to copy the instruction, and how much space must
923 be reserved there. We use these in step n1.
924
925 - gdbarch_displaced_step_copy_insn copies a instruction to a new
926 address, and makes any necessary adjustments to the instruction,
927 register contents, and memory. We use this in step n1.
928
929 - gdbarch_displaced_step_fixup adjusts registers and memory after
930 we have successfuly single-stepped the instruction, to yield the
931 same effect the instruction would have had if we had executed it
932 at its original address. We use this in step n3.
933
934 - gdbarch_displaced_step_free_closure provides cleanup.
935
936 The gdbarch_displaced_step_copy_insn and
937 gdbarch_displaced_step_fixup functions must be written so that
938 copying an instruction with gdbarch_displaced_step_copy_insn,
939 single-stepping across the copied instruction, and then applying
940 gdbarch_displaced_insn_fixup should have the same effects on the
941 thread's memory and registers as stepping the instruction in place
942 would have. Exactly which responsibilities fall to the copy and
943 which fall to the fixup is up to the author of those functions.
944
945 See the comments in gdbarch.sh for details.
946
947 Note that displaced stepping and software single-step cannot
948 currently be used in combination, although with some care I think
949 they could be made to. Software single-step works by placing
950 breakpoints on all possible subsequent instructions; if the
951 displaced instruction is a PC-relative jump, those breakpoints
952 could fall in very strange places --- on pages that aren't
953 executable, or at addresses that are not proper instruction
954 boundaries. (We do generally let other threads run while we wait
955 to hit the software single-step breakpoint, and they might
956 encounter such a corrupted instruction.) One way to work around
957 this would be to have gdbarch_displaced_step_copy_insn fully
958 simulate the effect of PC-relative instructions (and return NULL)
959 on architectures that use software single-stepping.
960
961 In non-stop mode, we can have independent and simultaneous step
962 requests, so more than one thread may need to simultaneously step
963 over a breakpoint. The current implementation assumes there is
964 only one scratch space per process. In this case, we have to
965 serialize access to the scratch space. If thread A wants to step
966 over a breakpoint, but we are currently waiting for some other
967 thread to complete a displaced step, we leave thread A stopped and
968 place it in the displaced_step_request_queue. Whenever a displaced
969 step finishes, we pick the next thread in the queue and start a new
970 displaced step operation on it. See displaced_step_prepare and
971 displaced_step_fixup for details. */
972
973 struct displaced_step_request
974 {
975 ptid_t ptid;
976 struct displaced_step_request *next;
977 };
978
979 /* Per-inferior displaced stepping state. */
980 struct displaced_step_inferior_state
981 {
982 /* Pointer to next in linked list. */
983 struct displaced_step_inferior_state *next;
984
985 /* The process this displaced step state refers to. */
986 int pid;
987
988 /* A queue of pending displaced stepping requests. One entry per
989 thread that needs to do a displaced step. */
990 struct displaced_step_request *step_request_queue;
991
992 /* If this is not null_ptid, this is the thread carrying out a
993 displaced single-step in process PID. This thread's state will
994 require fixing up once it has completed its step. */
995 ptid_t step_ptid;
996
997 /* The architecture the thread had when we stepped it. */
998 struct gdbarch *step_gdbarch;
999
1000 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1001 for post-step cleanup. */
1002 struct displaced_step_closure *step_closure;
1003
1004 /* The address of the original instruction, and the copy we
1005 made. */
1006 CORE_ADDR step_original, step_copy;
1007
1008 /* Saved contents of copy area. */
1009 gdb_byte *step_saved_copy;
1010 };
1011
1012 /* The list of states of processes involved in displaced stepping
1013 presently. */
1014 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1015
1016 /* Get the displaced stepping state of process PID. */
1017
1018 static struct displaced_step_inferior_state *
1019 get_displaced_stepping_state (int pid)
1020 {
1021 struct displaced_step_inferior_state *state;
1022
1023 for (state = displaced_step_inferior_states;
1024 state != NULL;
1025 state = state->next)
1026 if (state->pid == pid)
1027 return state;
1028
1029 return NULL;
1030 }
1031
1032 /* Add a new displaced stepping state for process PID to the displaced
1033 stepping state list, or return a pointer to an already existing
1034 entry, if it already exists. Never returns NULL. */
1035
1036 static struct displaced_step_inferior_state *
1037 add_displaced_stepping_state (int pid)
1038 {
1039 struct displaced_step_inferior_state *state;
1040
1041 for (state = displaced_step_inferior_states;
1042 state != NULL;
1043 state = state->next)
1044 if (state->pid == pid)
1045 return state;
1046
1047 state = xcalloc (1, sizeof (*state));
1048 state->pid = pid;
1049 state->next = displaced_step_inferior_states;
1050 displaced_step_inferior_states = state;
1051
1052 return state;
1053 }
1054
1055 /* Remove the displaced stepping state of process PID. */
1056
1057 static void
1058 remove_displaced_stepping_state (int pid)
1059 {
1060 struct displaced_step_inferior_state *it, **prev_next_p;
1061
1062 gdb_assert (pid != 0);
1063
1064 it = displaced_step_inferior_states;
1065 prev_next_p = &displaced_step_inferior_states;
1066 while (it)
1067 {
1068 if (it->pid == pid)
1069 {
1070 *prev_next_p = it->next;
1071 xfree (it);
1072 return;
1073 }
1074
1075 prev_next_p = &it->next;
1076 it = *prev_next_p;
1077 }
1078 }
1079
1080 static void
1081 infrun_inferior_exit (struct inferior *inf)
1082 {
1083 remove_displaced_stepping_state (inf->pid);
1084 }
1085
1086 /* Enum strings for "set|show displaced-stepping". */
1087
1088 static const char can_use_displaced_stepping_auto[] = "auto";
1089 static const char can_use_displaced_stepping_on[] = "on";
1090 static const char can_use_displaced_stepping_off[] = "off";
1091 static const char *can_use_displaced_stepping_enum[] =
1092 {
1093 can_use_displaced_stepping_auto,
1094 can_use_displaced_stepping_on,
1095 can_use_displaced_stepping_off,
1096 NULL,
1097 };
1098
1099 /* If ON, and the architecture supports it, GDB will use displaced
1100 stepping to step over breakpoints. If OFF, or if the architecture
1101 doesn't support it, GDB will instead use the traditional
1102 hold-and-step approach. If AUTO (which is the default), GDB will
1103 decide which technique to use to step over breakpoints depending on
1104 which of all-stop or non-stop mode is active --- displaced stepping
1105 in non-stop mode; hold-and-step in all-stop mode. */
1106
1107 static const char *can_use_displaced_stepping =
1108 can_use_displaced_stepping_auto;
1109
1110 static void
1111 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1112 struct cmd_list_element *c,
1113 const char *value)
1114 {
1115 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1116 fprintf_filtered (file, _("\
1117 Debugger's willingness to use displaced stepping to step over \
1118 breakpoints is %s (currently %s).\n"),
1119 value, non_stop ? "on" : "off");
1120 else
1121 fprintf_filtered (file, _("\
1122 Debugger's willingness to use displaced stepping to step over \
1123 breakpoints is %s.\n"), value);
1124 }
1125
1126 /* Return non-zero if displaced stepping can/should be used to step
1127 over breakpoints. */
1128
1129 static int
1130 use_displaced_stepping (struct gdbarch *gdbarch)
1131 {
1132 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1133 && non_stop)
1134 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1135 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1136 && !RECORD_IS_USED);
1137 }
1138
1139 /* Clean out any stray displaced stepping state. */
1140 static void
1141 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1142 {
1143 /* Indicate that there is no cleanup pending. */
1144 displaced->step_ptid = null_ptid;
1145
1146 if (displaced->step_closure)
1147 {
1148 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1149 displaced->step_closure);
1150 displaced->step_closure = NULL;
1151 }
1152 }
1153
1154 static void
1155 displaced_step_clear_cleanup (void *arg)
1156 {
1157 struct displaced_step_inferior_state *state = arg;
1158
1159 displaced_step_clear (state);
1160 }
1161
1162 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1163 void
1164 displaced_step_dump_bytes (struct ui_file *file,
1165 const gdb_byte *buf,
1166 size_t len)
1167 {
1168 int i;
1169
1170 for (i = 0; i < len; i++)
1171 fprintf_unfiltered (file, "%02x ", buf[i]);
1172 fputs_unfiltered ("\n", file);
1173 }
1174
1175 /* Prepare to single-step, using displaced stepping.
1176
1177 Note that we cannot use displaced stepping when we have a signal to
1178 deliver. If we have a signal to deliver and an instruction to step
1179 over, then after the step, there will be no indication from the
1180 target whether the thread entered a signal handler or ignored the
1181 signal and stepped over the instruction successfully --- both cases
1182 result in a simple SIGTRAP. In the first case we mustn't do a
1183 fixup, and in the second case we must --- but we can't tell which.
1184 Comments in the code for 'random signals' in handle_inferior_event
1185 explain how we handle this case instead.
1186
1187 Returns 1 if preparing was successful -- this thread is going to be
1188 stepped now; or 0 if displaced stepping this thread got queued. */
1189 static int
1190 displaced_step_prepare (ptid_t ptid)
1191 {
1192 struct cleanup *old_cleanups, *ignore_cleanups;
1193 struct regcache *regcache = get_thread_regcache (ptid);
1194 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1195 CORE_ADDR original, copy;
1196 ULONGEST len;
1197 struct displaced_step_closure *closure;
1198 struct displaced_step_inferior_state *displaced;
1199
1200 /* We should never reach this function if the architecture does not
1201 support displaced stepping. */
1202 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1203
1204 /* We have to displaced step one thread at a time, as we only have
1205 access to a single scratch space per inferior. */
1206
1207 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1208
1209 if (!ptid_equal (displaced->step_ptid, null_ptid))
1210 {
1211 /* Already waiting for a displaced step to finish. Defer this
1212 request and place in queue. */
1213 struct displaced_step_request *req, *new_req;
1214
1215 if (debug_displaced)
1216 fprintf_unfiltered (gdb_stdlog,
1217 "displaced: defering step of %s\n",
1218 target_pid_to_str (ptid));
1219
1220 new_req = xmalloc (sizeof (*new_req));
1221 new_req->ptid = ptid;
1222 new_req->next = NULL;
1223
1224 if (displaced->step_request_queue)
1225 {
1226 for (req = displaced->step_request_queue;
1227 req && req->next;
1228 req = req->next)
1229 ;
1230 req->next = new_req;
1231 }
1232 else
1233 displaced->step_request_queue = new_req;
1234
1235 return 0;
1236 }
1237 else
1238 {
1239 if (debug_displaced)
1240 fprintf_unfiltered (gdb_stdlog,
1241 "displaced: stepping %s now\n",
1242 target_pid_to_str (ptid));
1243 }
1244
1245 displaced_step_clear (displaced);
1246
1247 old_cleanups = save_inferior_ptid ();
1248 inferior_ptid = ptid;
1249
1250 original = regcache_read_pc (regcache);
1251
1252 copy = gdbarch_displaced_step_location (gdbarch);
1253 len = gdbarch_max_insn_length (gdbarch);
1254
1255 /* Save the original contents of the copy area. */
1256 displaced->step_saved_copy = xmalloc (len);
1257 ignore_cleanups = make_cleanup (free_current_contents,
1258 &displaced->step_saved_copy);
1259 read_memory (copy, displaced->step_saved_copy, len);
1260 if (debug_displaced)
1261 {
1262 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1263 paddress (gdbarch, copy));
1264 displaced_step_dump_bytes (gdb_stdlog,
1265 displaced->step_saved_copy,
1266 len);
1267 };
1268
1269 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1270 original, copy, regcache);
1271
1272 /* We don't support the fully-simulated case at present. */
1273 gdb_assert (closure);
1274
1275 /* Save the information we need to fix things up if the step
1276 succeeds. */
1277 displaced->step_ptid = ptid;
1278 displaced->step_gdbarch = gdbarch;
1279 displaced->step_closure = closure;
1280 displaced->step_original = original;
1281 displaced->step_copy = copy;
1282
1283 make_cleanup (displaced_step_clear_cleanup, displaced);
1284
1285 /* Resume execution at the copy. */
1286 regcache_write_pc (regcache, copy);
1287
1288 discard_cleanups (ignore_cleanups);
1289
1290 do_cleanups (old_cleanups);
1291
1292 if (debug_displaced)
1293 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1294 paddress (gdbarch, copy));
1295
1296 return 1;
1297 }
1298
1299 static void
1300 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1301 {
1302 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1303
1304 inferior_ptid = ptid;
1305 write_memory (memaddr, myaddr, len);
1306 do_cleanups (ptid_cleanup);
1307 }
1308
1309 static void
1310 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1311 {
1312 struct cleanup *old_cleanups;
1313 struct displaced_step_inferior_state *displaced
1314 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1315
1316 /* Was any thread of this process doing a displaced step? */
1317 if (displaced == NULL)
1318 return;
1319
1320 /* Was this event for the pid we displaced? */
1321 if (ptid_equal (displaced->step_ptid, null_ptid)
1322 || ! ptid_equal (displaced->step_ptid, event_ptid))
1323 return;
1324
1325 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1326
1327 /* Restore the contents of the copy area. */
1328 {
1329 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1330
1331 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1332 displaced->step_saved_copy, len);
1333 if (debug_displaced)
1334 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1335 paddress (displaced->step_gdbarch,
1336 displaced->step_copy));
1337 }
1338
1339 /* Did the instruction complete successfully? */
1340 if (signal == TARGET_SIGNAL_TRAP)
1341 {
1342 /* Fix up the resulting state. */
1343 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1344 displaced->step_closure,
1345 displaced->step_original,
1346 displaced->step_copy,
1347 get_thread_regcache (displaced->step_ptid));
1348 }
1349 else
1350 {
1351 /* Since the instruction didn't complete, all we can do is
1352 relocate the PC. */
1353 struct regcache *regcache = get_thread_regcache (event_ptid);
1354 CORE_ADDR pc = regcache_read_pc (regcache);
1355
1356 pc = displaced->step_original + (pc - displaced->step_copy);
1357 regcache_write_pc (regcache, pc);
1358 }
1359
1360 do_cleanups (old_cleanups);
1361
1362 displaced->step_ptid = null_ptid;
1363
1364 /* Are there any pending displaced stepping requests? If so, run
1365 one now. Leave the state object around, since we're likely to
1366 need it again soon. */
1367 while (displaced->step_request_queue)
1368 {
1369 struct displaced_step_request *head;
1370 ptid_t ptid;
1371 struct regcache *regcache;
1372 struct gdbarch *gdbarch;
1373 CORE_ADDR actual_pc;
1374 struct address_space *aspace;
1375
1376 head = displaced->step_request_queue;
1377 ptid = head->ptid;
1378 displaced->step_request_queue = head->next;
1379 xfree (head);
1380
1381 context_switch (ptid);
1382
1383 regcache = get_thread_regcache (ptid);
1384 actual_pc = regcache_read_pc (regcache);
1385 aspace = get_regcache_aspace (regcache);
1386
1387 if (breakpoint_here_p (aspace, actual_pc))
1388 {
1389 if (debug_displaced)
1390 fprintf_unfiltered (gdb_stdlog,
1391 "displaced: stepping queued %s now\n",
1392 target_pid_to_str (ptid));
1393
1394 displaced_step_prepare (ptid);
1395
1396 gdbarch = get_regcache_arch (regcache);
1397
1398 if (debug_displaced)
1399 {
1400 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1401 gdb_byte buf[4];
1402
1403 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1404 paddress (gdbarch, actual_pc));
1405 read_memory (actual_pc, buf, sizeof (buf));
1406 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1407 }
1408
1409 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1410 displaced->step_closure))
1411 target_resume (ptid, 1, TARGET_SIGNAL_0);
1412 else
1413 target_resume (ptid, 0, TARGET_SIGNAL_0);
1414
1415 /* Done, we're stepping a thread. */
1416 break;
1417 }
1418 else
1419 {
1420 int step;
1421 struct thread_info *tp = inferior_thread ();
1422
1423 /* The breakpoint we were sitting under has since been
1424 removed. */
1425 tp->trap_expected = 0;
1426
1427 /* Go back to what we were trying to do. */
1428 step = currently_stepping (tp);
1429
1430 if (debug_displaced)
1431 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1432 target_pid_to_str (tp->ptid), step);
1433
1434 target_resume (ptid, step, TARGET_SIGNAL_0);
1435 tp->stop_signal = TARGET_SIGNAL_0;
1436
1437 /* This request was discarded. See if there's any other
1438 thread waiting for its turn. */
1439 }
1440 }
1441 }
1442
1443 /* Update global variables holding ptids to hold NEW_PTID if they were
1444 holding OLD_PTID. */
1445 static void
1446 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1447 {
1448 struct displaced_step_request *it;
1449 struct displaced_step_inferior_state *displaced;
1450
1451 if (ptid_equal (inferior_ptid, old_ptid))
1452 inferior_ptid = new_ptid;
1453
1454 if (ptid_equal (singlestep_ptid, old_ptid))
1455 singlestep_ptid = new_ptid;
1456
1457 if (ptid_equal (deferred_step_ptid, old_ptid))
1458 deferred_step_ptid = new_ptid;
1459
1460 for (displaced = displaced_step_inferior_states;
1461 displaced;
1462 displaced = displaced->next)
1463 {
1464 if (ptid_equal (displaced->step_ptid, old_ptid))
1465 displaced->step_ptid = new_ptid;
1466
1467 for (it = displaced->step_request_queue; it; it = it->next)
1468 if (ptid_equal (it->ptid, old_ptid))
1469 it->ptid = new_ptid;
1470 }
1471 }
1472
1473 \f
1474 /* Resuming. */
1475
1476 /* Things to clean up if we QUIT out of resume (). */
1477 static void
1478 resume_cleanups (void *ignore)
1479 {
1480 normal_stop ();
1481 }
1482
1483 static const char schedlock_off[] = "off";
1484 static const char schedlock_on[] = "on";
1485 static const char schedlock_step[] = "step";
1486 static const char *scheduler_enums[] = {
1487 schedlock_off,
1488 schedlock_on,
1489 schedlock_step,
1490 NULL
1491 };
1492 static const char *scheduler_mode = schedlock_off;
1493 static void
1494 show_scheduler_mode (struct ui_file *file, int from_tty,
1495 struct cmd_list_element *c, const char *value)
1496 {
1497 fprintf_filtered (file, _("\
1498 Mode for locking scheduler during execution is \"%s\".\n"),
1499 value);
1500 }
1501
1502 static void
1503 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1504 {
1505 if (!target_can_lock_scheduler)
1506 {
1507 scheduler_mode = schedlock_off;
1508 error (_("Target '%s' cannot support this command."), target_shortname);
1509 }
1510 }
1511
1512 /* True if execution commands resume all threads of all processes by
1513 default; otherwise, resume only threads of the current inferior
1514 process. */
1515 int sched_multi = 0;
1516
1517 /* Try to setup for software single stepping over the specified location.
1518 Return 1 if target_resume() should use hardware single step.
1519
1520 GDBARCH the current gdbarch.
1521 PC the location to step over. */
1522
1523 static int
1524 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1525 {
1526 int hw_step = 1;
1527
1528 if (execution_direction == EXEC_FORWARD
1529 && gdbarch_software_single_step_p (gdbarch)
1530 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1531 {
1532 hw_step = 0;
1533 /* Do not pull these breakpoints until after a `wait' in
1534 `wait_for_inferior' */
1535 singlestep_breakpoints_inserted_p = 1;
1536 singlestep_ptid = inferior_ptid;
1537 singlestep_pc = pc;
1538 }
1539 return hw_step;
1540 }
1541
1542 /* Resume the inferior, but allow a QUIT. This is useful if the user
1543 wants to interrupt some lengthy single-stepping operation
1544 (for child processes, the SIGINT goes to the inferior, and so
1545 we get a SIGINT random_signal, but for remote debugging and perhaps
1546 other targets, that's not true).
1547
1548 STEP nonzero if we should step (zero to continue instead).
1549 SIG is the signal to give the inferior (zero for none). */
1550 void
1551 resume (int step, enum target_signal sig)
1552 {
1553 int should_resume = 1;
1554 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1555 struct regcache *regcache = get_current_regcache ();
1556 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1557 struct thread_info *tp = inferior_thread ();
1558 CORE_ADDR pc = regcache_read_pc (regcache);
1559 struct address_space *aspace = get_regcache_aspace (regcache);
1560
1561 QUIT;
1562
1563 /* Don't consider single-stepping when the inferior is
1564 waiting_for_vfork_done, either software or hardware step. In
1565 software step, child process will hit the software single step
1566 breakpoint inserted in parent process. In hardware step, GDB
1567 can resumes inferior, and wait for vfork_done event. */
1568 if (current_inferior ()->waiting_for_vfork_done)
1569 {
1570 if (debug_infrun)
1571 fprintf_unfiltered (gdb_stdlog,
1572 "infrun: resume : clear step\n");
1573 step = 0;
1574 }
1575
1576 if (debug_infrun)
1577 fprintf_unfiltered (gdb_stdlog,
1578 "infrun: resume (step=%d, signal=%d), "
1579 "trap_expected=%d\n",
1580 step, sig, tp->trap_expected);
1581
1582 /* Normally, by the time we reach `resume', the breakpoints are either
1583 removed or inserted, as appropriate. The exception is if we're sitting
1584 at a permanent breakpoint; we need to step over it, but permanent
1585 breakpoints can't be removed. So we have to test for it here. */
1586 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1587 {
1588 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1589 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1590 else
1591 error (_("\
1592 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1593 how to step past a permanent breakpoint on this architecture. Try using\n\
1594 a command like `return' or `jump' to continue execution."));
1595 }
1596
1597 /* If enabled, step over breakpoints by executing a copy of the
1598 instruction at a different address.
1599
1600 We can't use displaced stepping when we have a signal to deliver;
1601 the comments for displaced_step_prepare explain why. The
1602 comments in the handle_inferior event for dealing with 'random
1603 signals' explain what we do instead.
1604
1605 We can't use displaced stepping when we are waiting for vfork_done
1606 event, displaced stepping breaks the vfork child similarly as single
1607 step software breakpoint. */
1608 if (use_displaced_stepping (gdbarch)
1609 && (tp->trap_expected
1610 || (step && gdbarch_software_single_step_p (gdbarch)))
1611 && sig == TARGET_SIGNAL_0
1612 && !current_inferior ()->waiting_for_vfork_done)
1613 {
1614 struct displaced_step_inferior_state *displaced;
1615
1616 if (!displaced_step_prepare (inferior_ptid))
1617 {
1618 /* Got placed in displaced stepping queue. Will be resumed
1619 later when all the currently queued displaced stepping
1620 requests finish. The thread is not executing at this point,
1621 and the call to set_executing will be made later. But we
1622 need to call set_running here, since from frontend point of view,
1623 the thread is running. */
1624 set_running (inferior_ptid, 1);
1625 discard_cleanups (old_cleanups);
1626 return;
1627 }
1628
1629 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1630 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1631 displaced->step_closure);
1632 }
1633
1634 /* Do we need to do it the hard way, w/temp breakpoints? */
1635 else if (step)
1636 step = maybe_software_singlestep (gdbarch, pc);
1637
1638 if (should_resume)
1639 {
1640 ptid_t resume_ptid;
1641
1642 /* If STEP is set, it's a request to use hardware stepping
1643 facilities. But in that case, we should never
1644 use singlestep breakpoint. */
1645 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1646
1647 /* Decide the set of threads to ask the target to resume. Start
1648 by assuming everything will be resumed, than narrow the set
1649 by applying increasingly restricting conditions. */
1650
1651 /* By default, resume all threads of all processes. */
1652 resume_ptid = RESUME_ALL;
1653
1654 /* Maybe resume only all threads of the current process. */
1655 if (!sched_multi && target_supports_multi_process ())
1656 {
1657 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1658 }
1659
1660 /* Maybe resume a single thread after all. */
1661 if (singlestep_breakpoints_inserted_p
1662 && stepping_past_singlestep_breakpoint)
1663 {
1664 /* The situation here is as follows. In thread T1 we wanted to
1665 single-step. Lacking hardware single-stepping we've
1666 set breakpoint at the PC of the next instruction -- call it
1667 P. After resuming, we've hit that breakpoint in thread T2.
1668 Now we've removed original breakpoint, inserted breakpoint
1669 at P+1, and try to step to advance T2 past breakpoint.
1670 We need to step only T2, as if T1 is allowed to freely run,
1671 it can run past P, and if other threads are allowed to run,
1672 they can hit breakpoint at P+1, and nested hits of single-step
1673 breakpoints is not something we'd want -- that's complicated
1674 to support, and has no value. */
1675 resume_ptid = inferior_ptid;
1676 }
1677 else if ((step || singlestep_breakpoints_inserted_p)
1678 && tp->trap_expected)
1679 {
1680 /* We're allowing a thread to run past a breakpoint it has
1681 hit, by single-stepping the thread with the breakpoint
1682 removed. In which case, we need to single-step only this
1683 thread, and keep others stopped, as they can miss this
1684 breakpoint if allowed to run.
1685
1686 The current code actually removes all breakpoints when
1687 doing this, not just the one being stepped over, so if we
1688 let other threads run, we can actually miss any
1689 breakpoint, not just the one at PC. */
1690 resume_ptid = inferior_ptid;
1691 }
1692 else if (non_stop)
1693 {
1694 /* With non-stop mode on, threads are always handled
1695 individually. */
1696 resume_ptid = inferior_ptid;
1697 }
1698 else if ((scheduler_mode == schedlock_on)
1699 || (scheduler_mode == schedlock_step
1700 && (step || singlestep_breakpoints_inserted_p)))
1701 {
1702 /* User-settable 'scheduler' mode requires solo thread resume. */
1703 resume_ptid = inferior_ptid;
1704 }
1705
1706 if (gdbarch_cannot_step_breakpoint (gdbarch))
1707 {
1708 /* Most targets can step a breakpoint instruction, thus
1709 executing it normally. But if this one cannot, just
1710 continue and we will hit it anyway. */
1711 if (step && breakpoint_inserted_here_p (aspace, pc))
1712 step = 0;
1713 }
1714
1715 if (debug_displaced
1716 && use_displaced_stepping (gdbarch)
1717 && tp->trap_expected)
1718 {
1719 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1720 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1721 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1722 gdb_byte buf[4];
1723
1724 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1725 paddress (resume_gdbarch, actual_pc));
1726 read_memory (actual_pc, buf, sizeof (buf));
1727 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1728 }
1729
1730 /* Install inferior's terminal modes. */
1731 target_terminal_inferior ();
1732
1733 /* Avoid confusing the next resume, if the next stop/resume
1734 happens to apply to another thread. */
1735 tp->stop_signal = TARGET_SIGNAL_0;
1736
1737 target_resume (resume_ptid, step, sig);
1738 }
1739
1740 discard_cleanups (old_cleanups);
1741 }
1742 \f
1743 /* Proceeding. */
1744
1745 /* Clear out all variables saying what to do when inferior is continued.
1746 First do this, then set the ones you want, then call `proceed'. */
1747
1748 static void
1749 clear_proceed_status_thread (struct thread_info *tp)
1750 {
1751 if (debug_infrun)
1752 fprintf_unfiltered (gdb_stdlog,
1753 "infrun: clear_proceed_status_thread (%s)\n",
1754 target_pid_to_str (tp->ptid));
1755
1756 tp->trap_expected = 0;
1757 tp->step_range_start = 0;
1758 tp->step_range_end = 0;
1759 tp->step_frame_id = null_frame_id;
1760 tp->step_stack_frame_id = null_frame_id;
1761 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1762 tp->stop_requested = 0;
1763
1764 tp->stop_step = 0;
1765
1766 tp->proceed_to_finish = 0;
1767
1768 /* Discard any remaining commands or status from previous stop. */
1769 bpstat_clear (&tp->stop_bpstat);
1770 }
1771
1772 static int
1773 clear_proceed_status_callback (struct thread_info *tp, void *data)
1774 {
1775 if (is_exited (tp->ptid))
1776 return 0;
1777
1778 clear_proceed_status_thread (tp);
1779 return 0;
1780 }
1781
1782 void
1783 clear_proceed_status (void)
1784 {
1785 if (!non_stop)
1786 {
1787 /* In all-stop mode, delete the per-thread status of all
1788 threads, even if inferior_ptid is null_ptid, there may be
1789 threads on the list. E.g., we may be launching a new
1790 process, while selecting the executable. */
1791 iterate_over_threads (clear_proceed_status_callback, NULL);
1792 }
1793
1794 if (!ptid_equal (inferior_ptid, null_ptid))
1795 {
1796 struct inferior *inferior;
1797
1798 if (non_stop)
1799 {
1800 /* If in non-stop mode, only delete the per-thread status of
1801 the current thread. */
1802 clear_proceed_status_thread (inferior_thread ());
1803 }
1804
1805 inferior = current_inferior ();
1806 inferior->stop_soon = NO_STOP_QUIETLY;
1807 }
1808
1809 stop_after_trap = 0;
1810
1811 observer_notify_about_to_proceed ();
1812
1813 if (stop_registers)
1814 {
1815 regcache_xfree (stop_registers);
1816 stop_registers = NULL;
1817 }
1818 }
1819
1820 /* Check the current thread against the thread that reported the most recent
1821 event. If a step-over is required return TRUE and set the current thread
1822 to the old thread. Otherwise return FALSE.
1823
1824 This should be suitable for any targets that support threads. */
1825
1826 static int
1827 prepare_to_proceed (int step)
1828 {
1829 ptid_t wait_ptid;
1830 struct target_waitstatus wait_status;
1831 int schedlock_enabled;
1832
1833 /* With non-stop mode on, threads are always handled individually. */
1834 gdb_assert (! non_stop);
1835
1836 /* Get the last target status returned by target_wait(). */
1837 get_last_target_status (&wait_ptid, &wait_status);
1838
1839 /* Make sure we were stopped at a breakpoint. */
1840 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1841 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1842 && wait_status.value.sig != TARGET_SIGNAL_ILL
1843 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1844 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1845 {
1846 return 0;
1847 }
1848
1849 schedlock_enabled = (scheduler_mode == schedlock_on
1850 || (scheduler_mode == schedlock_step
1851 && step));
1852
1853 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1854 if (schedlock_enabled)
1855 return 0;
1856
1857 /* Don't switch over if we're about to resume some other process
1858 other than WAIT_PTID's, and schedule-multiple is off. */
1859 if (!sched_multi
1860 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1861 return 0;
1862
1863 /* Switched over from WAIT_PID. */
1864 if (!ptid_equal (wait_ptid, minus_one_ptid)
1865 && !ptid_equal (inferior_ptid, wait_ptid))
1866 {
1867 struct regcache *regcache = get_thread_regcache (wait_ptid);
1868
1869 if (breakpoint_here_p (get_regcache_aspace (regcache),
1870 regcache_read_pc (regcache)))
1871 {
1872 /* If stepping, remember current thread to switch back to. */
1873 if (step)
1874 deferred_step_ptid = inferior_ptid;
1875
1876 /* Switch back to WAIT_PID thread. */
1877 switch_to_thread (wait_ptid);
1878
1879 /* We return 1 to indicate that there is a breakpoint here,
1880 so we need to step over it before continuing to avoid
1881 hitting it straight away. */
1882 return 1;
1883 }
1884 }
1885
1886 return 0;
1887 }
1888
1889 /* Basic routine for continuing the program in various fashions.
1890
1891 ADDR is the address to resume at, or -1 for resume where stopped.
1892 SIGGNAL is the signal to give it, or 0 for none,
1893 or -1 for act according to how it stopped.
1894 STEP is nonzero if should trap after one instruction.
1895 -1 means return after that and print nothing.
1896 You should probably set various step_... variables
1897 before calling here, if you are stepping.
1898
1899 You should call clear_proceed_status before calling proceed. */
1900
1901 void
1902 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1903 {
1904 struct regcache *regcache;
1905 struct gdbarch *gdbarch;
1906 struct thread_info *tp;
1907 CORE_ADDR pc;
1908 struct address_space *aspace;
1909 int oneproc = 0;
1910
1911 /* If we're stopped at a fork/vfork, follow the branch set by the
1912 "set follow-fork-mode" command; otherwise, we'll just proceed
1913 resuming the current thread. */
1914 if (!follow_fork ())
1915 {
1916 /* The target for some reason decided not to resume. */
1917 normal_stop ();
1918 return;
1919 }
1920
1921 regcache = get_current_regcache ();
1922 gdbarch = get_regcache_arch (regcache);
1923 aspace = get_regcache_aspace (regcache);
1924 pc = regcache_read_pc (regcache);
1925
1926 if (step > 0)
1927 step_start_function = find_pc_function (pc);
1928 if (step < 0)
1929 stop_after_trap = 1;
1930
1931 if (addr == (CORE_ADDR) -1)
1932 {
1933 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1934 && execution_direction != EXEC_REVERSE)
1935 /* There is a breakpoint at the address we will resume at,
1936 step one instruction before inserting breakpoints so that
1937 we do not stop right away (and report a second hit at this
1938 breakpoint).
1939
1940 Note, we don't do this in reverse, because we won't
1941 actually be executing the breakpoint insn anyway.
1942 We'll be (un-)executing the previous instruction. */
1943
1944 oneproc = 1;
1945 else if (gdbarch_single_step_through_delay_p (gdbarch)
1946 && gdbarch_single_step_through_delay (gdbarch,
1947 get_current_frame ()))
1948 /* We stepped onto an instruction that needs to be stepped
1949 again before re-inserting the breakpoint, do so. */
1950 oneproc = 1;
1951 }
1952 else
1953 {
1954 regcache_write_pc (regcache, addr);
1955 }
1956
1957 if (debug_infrun)
1958 fprintf_unfiltered (gdb_stdlog,
1959 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1960 paddress (gdbarch, addr), siggnal, step);
1961
1962 /* We're handling a live event, so make sure we're doing live
1963 debugging. If we're looking at traceframes while the target is
1964 running, we're going to need to get back to that mode after
1965 handling the event. */
1966 if (non_stop)
1967 {
1968 make_cleanup_restore_current_traceframe ();
1969 set_traceframe_number (-1);
1970 }
1971
1972 if (non_stop)
1973 /* In non-stop, each thread is handled individually. The context
1974 must already be set to the right thread here. */
1975 ;
1976 else
1977 {
1978 /* In a multi-threaded task we may select another thread and
1979 then continue or step.
1980
1981 But if the old thread was stopped at a breakpoint, it will
1982 immediately cause another breakpoint stop without any
1983 execution (i.e. it will report a breakpoint hit incorrectly).
1984 So we must step over it first.
1985
1986 prepare_to_proceed checks the current thread against the
1987 thread that reported the most recent event. If a step-over
1988 is required it returns TRUE and sets the current thread to
1989 the old thread. */
1990 if (prepare_to_proceed (step))
1991 oneproc = 1;
1992 }
1993
1994 /* prepare_to_proceed may change the current thread. */
1995 tp = inferior_thread ();
1996
1997 if (oneproc)
1998 {
1999 tp->trap_expected = 1;
2000 /* If displaced stepping is enabled, we can step over the
2001 breakpoint without hitting it, so leave all breakpoints
2002 inserted. Otherwise we need to disable all breakpoints, step
2003 one instruction, and then re-add them when that step is
2004 finished. */
2005 if (!use_displaced_stepping (gdbarch))
2006 remove_breakpoints ();
2007 }
2008
2009 /* We can insert breakpoints if we're not trying to step over one,
2010 or if we are stepping over one but we're using displaced stepping
2011 to do so. */
2012 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
2013 insert_breakpoints ();
2014
2015 if (!non_stop)
2016 {
2017 /* Pass the last stop signal to the thread we're resuming,
2018 irrespective of whether the current thread is the thread that
2019 got the last event or not. This was historically GDB's
2020 behaviour before keeping a stop_signal per thread. */
2021
2022 struct thread_info *last_thread;
2023 ptid_t last_ptid;
2024 struct target_waitstatus last_status;
2025
2026 get_last_target_status (&last_ptid, &last_status);
2027 if (!ptid_equal (inferior_ptid, last_ptid)
2028 && !ptid_equal (last_ptid, null_ptid)
2029 && !ptid_equal (last_ptid, minus_one_ptid))
2030 {
2031 last_thread = find_thread_ptid (last_ptid);
2032 if (last_thread)
2033 {
2034 tp->stop_signal = last_thread->stop_signal;
2035 last_thread->stop_signal = TARGET_SIGNAL_0;
2036 }
2037 }
2038 }
2039
2040 if (siggnal != TARGET_SIGNAL_DEFAULT)
2041 tp->stop_signal = siggnal;
2042 /* If this signal should not be seen by program,
2043 give it zero. Used for debugging signals. */
2044 else if (!signal_program[tp->stop_signal])
2045 tp->stop_signal = TARGET_SIGNAL_0;
2046
2047 annotate_starting ();
2048
2049 /* Make sure that output from GDB appears before output from the
2050 inferior. */
2051 gdb_flush (gdb_stdout);
2052
2053 /* Refresh prev_pc value just prior to resuming. This used to be
2054 done in stop_stepping, however, setting prev_pc there did not handle
2055 scenarios such as inferior function calls or returning from
2056 a function via the return command. In those cases, the prev_pc
2057 value was not set properly for subsequent commands. The prev_pc value
2058 is used to initialize the starting line number in the ecs. With an
2059 invalid value, the gdb next command ends up stopping at the position
2060 represented by the next line table entry past our start position.
2061 On platforms that generate one line table entry per line, this
2062 is not a problem. However, on the ia64, the compiler generates
2063 extraneous line table entries that do not increase the line number.
2064 When we issue the gdb next command on the ia64 after an inferior call
2065 or a return command, we often end up a few instructions forward, still
2066 within the original line we started.
2067
2068 An attempt was made to refresh the prev_pc at the same time the
2069 execution_control_state is initialized (for instance, just before
2070 waiting for an inferior event). But this approach did not work
2071 because of platforms that use ptrace, where the pc register cannot
2072 be read unless the inferior is stopped. At that point, we are not
2073 guaranteed the inferior is stopped and so the regcache_read_pc() call
2074 can fail. Setting the prev_pc value here ensures the value is updated
2075 correctly when the inferior is stopped. */
2076 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2077
2078 /* Fill in with reasonable starting values. */
2079 init_thread_stepping_state (tp);
2080
2081 /* Reset to normal state. */
2082 init_infwait_state ();
2083
2084 /* Resume inferior. */
2085 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
2086
2087 /* Wait for it to stop (if not standalone)
2088 and in any case decode why it stopped, and act accordingly. */
2089 /* Do this only if we are not using the event loop, or if the target
2090 does not support asynchronous execution. */
2091 if (!target_can_async_p ())
2092 {
2093 wait_for_inferior (0);
2094 normal_stop ();
2095 }
2096 }
2097 \f
2098
2099 /* Start remote-debugging of a machine over a serial link. */
2100
2101 void
2102 start_remote (int from_tty)
2103 {
2104 struct inferior *inferior;
2105
2106 init_wait_for_inferior ();
2107 inferior = current_inferior ();
2108 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2109
2110 /* Always go on waiting for the target, regardless of the mode. */
2111 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2112 indicate to wait_for_inferior that a target should timeout if
2113 nothing is returned (instead of just blocking). Because of this,
2114 targets expecting an immediate response need to, internally, set
2115 things up so that the target_wait() is forced to eventually
2116 timeout. */
2117 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2118 differentiate to its caller what the state of the target is after
2119 the initial open has been performed. Here we're assuming that
2120 the target has stopped. It should be possible to eventually have
2121 target_open() return to the caller an indication that the target
2122 is currently running and GDB state should be set to the same as
2123 for an async run. */
2124 wait_for_inferior (0);
2125
2126 /* Now that the inferior has stopped, do any bookkeeping like
2127 loading shared libraries. We want to do this before normal_stop,
2128 so that the displayed frame is up to date. */
2129 post_create_inferior (&current_target, from_tty);
2130
2131 normal_stop ();
2132 }
2133
2134 /* Initialize static vars when a new inferior begins. */
2135
2136 void
2137 init_wait_for_inferior (void)
2138 {
2139 /* These are meaningless until the first time through wait_for_inferior. */
2140
2141 breakpoint_init_inferior (inf_starting);
2142
2143 clear_proceed_status ();
2144
2145 stepping_past_singlestep_breakpoint = 0;
2146 deferred_step_ptid = null_ptid;
2147
2148 target_last_wait_ptid = minus_one_ptid;
2149
2150 previous_inferior_ptid = null_ptid;
2151 init_infwait_state ();
2152
2153 /* Discard any skipped inlined frames. */
2154 clear_inline_frame_state (minus_one_ptid);
2155 }
2156
2157 \f
2158 /* This enum encodes possible reasons for doing a target_wait, so that
2159 wfi can call target_wait in one place. (Ultimately the call will be
2160 moved out of the infinite loop entirely.) */
2161
2162 enum infwait_states
2163 {
2164 infwait_normal_state,
2165 infwait_thread_hop_state,
2166 infwait_step_watch_state,
2167 infwait_nonstep_watch_state
2168 };
2169
2170 /* The PTID we'll do a target_wait on.*/
2171 ptid_t waiton_ptid;
2172
2173 /* Current inferior wait state. */
2174 enum infwait_states infwait_state;
2175
2176 /* Data to be passed around while handling an event. This data is
2177 discarded between events. */
2178 struct execution_control_state
2179 {
2180 ptid_t ptid;
2181 /* The thread that got the event, if this was a thread event; NULL
2182 otherwise. */
2183 struct thread_info *event_thread;
2184
2185 struct target_waitstatus ws;
2186 int random_signal;
2187 CORE_ADDR stop_func_start;
2188 CORE_ADDR stop_func_end;
2189 char *stop_func_name;
2190 int new_thread_event;
2191 int wait_some_more;
2192 };
2193
2194 static void handle_inferior_event (struct execution_control_state *ecs);
2195
2196 static void handle_step_into_function (struct gdbarch *gdbarch,
2197 struct execution_control_state *ecs);
2198 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2199 struct execution_control_state *ecs);
2200 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2201 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2202 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2203 struct symtab_and_line sr_sal,
2204 struct frame_id sr_id);
2205 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2206
2207 static void stop_stepping (struct execution_control_state *ecs);
2208 static void prepare_to_wait (struct execution_control_state *ecs);
2209 static void keep_going (struct execution_control_state *ecs);
2210
2211 /* Callback for iterate over threads. If the thread is stopped, but
2212 the user/frontend doesn't know about that yet, go through
2213 normal_stop, as if the thread had just stopped now. ARG points at
2214 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2215 ptid_is_pid(PTID) is true, applies to all threads of the process
2216 pointed at by PTID. Otherwise, apply only to the thread pointed by
2217 PTID. */
2218
2219 static int
2220 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2221 {
2222 ptid_t ptid = * (ptid_t *) arg;
2223
2224 if ((ptid_equal (info->ptid, ptid)
2225 || ptid_equal (minus_one_ptid, ptid)
2226 || (ptid_is_pid (ptid)
2227 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2228 && is_running (info->ptid)
2229 && !is_executing (info->ptid))
2230 {
2231 struct cleanup *old_chain;
2232 struct execution_control_state ecss;
2233 struct execution_control_state *ecs = &ecss;
2234
2235 memset (ecs, 0, sizeof (*ecs));
2236
2237 old_chain = make_cleanup_restore_current_thread ();
2238
2239 switch_to_thread (info->ptid);
2240
2241 /* Go through handle_inferior_event/normal_stop, so we always
2242 have consistent output as if the stop event had been
2243 reported. */
2244 ecs->ptid = info->ptid;
2245 ecs->event_thread = find_thread_ptid (info->ptid);
2246 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2247 ecs->ws.value.sig = TARGET_SIGNAL_0;
2248
2249 handle_inferior_event (ecs);
2250
2251 if (!ecs->wait_some_more)
2252 {
2253 struct thread_info *tp;
2254
2255 normal_stop ();
2256
2257 /* Finish off the continuations. The continations
2258 themselves are responsible for realising the thread
2259 didn't finish what it was supposed to do. */
2260 tp = inferior_thread ();
2261 do_all_intermediate_continuations_thread (tp);
2262 do_all_continuations_thread (tp);
2263 }
2264
2265 do_cleanups (old_chain);
2266 }
2267
2268 return 0;
2269 }
2270
2271 /* This function is attached as a "thread_stop_requested" observer.
2272 Cleanup local state that assumed the PTID was to be resumed, and
2273 report the stop to the frontend. */
2274
2275 static void
2276 infrun_thread_stop_requested (ptid_t ptid)
2277 {
2278 struct displaced_step_inferior_state *displaced;
2279
2280 /* PTID was requested to stop. Remove it from the displaced
2281 stepping queue, so we don't try to resume it automatically. */
2282
2283 for (displaced = displaced_step_inferior_states;
2284 displaced;
2285 displaced = displaced->next)
2286 {
2287 struct displaced_step_request *it, **prev_next_p;
2288
2289 it = displaced->step_request_queue;
2290 prev_next_p = &displaced->step_request_queue;
2291 while (it)
2292 {
2293 if (ptid_match (it->ptid, ptid))
2294 {
2295 *prev_next_p = it->next;
2296 it->next = NULL;
2297 xfree (it);
2298 }
2299 else
2300 {
2301 prev_next_p = &it->next;
2302 }
2303
2304 it = *prev_next_p;
2305 }
2306 }
2307
2308 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2309 }
2310
2311 static void
2312 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2313 {
2314 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2315 nullify_last_target_wait_ptid ();
2316 }
2317
2318 /* Callback for iterate_over_threads. */
2319
2320 static int
2321 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2322 {
2323 if (is_exited (info->ptid))
2324 return 0;
2325
2326 delete_step_resume_breakpoint (info);
2327 return 0;
2328 }
2329
2330 /* In all-stop, delete the step resume breakpoint of any thread that
2331 had one. In non-stop, delete the step resume breakpoint of the
2332 thread that just stopped. */
2333
2334 static void
2335 delete_step_thread_step_resume_breakpoint (void)
2336 {
2337 if (!target_has_execution
2338 || ptid_equal (inferior_ptid, null_ptid))
2339 /* If the inferior has exited, we have already deleted the step
2340 resume breakpoints out of GDB's lists. */
2341 return;
2342
2343 if (non_stop)
2344 {
2345 /* If in non-stop mode, only delete the step-resume or
2346 longjmp-resume breakpoint of the thread that just stopped
2347 stepping. */
2348 struct thread_info *tp = inferior_thread ();
2349
2350 delete_step_resume_breakpoint (tp);
2351 }
2352 else
2353 /* In all-stop mode, delete all step-resume and longjmp-resume
2354 breakpoints of any thread that had them. */
2355 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2356 }
2357
2358 /* A cleanup wrapper. */
2359
2360 static void
2361 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2362 {
2363 delete_step_thread_step_resume_breakpoint ();
2364 }
2365
2366 /* Pretty print the results of target_wait, for debugging purposes. */
2367
2368 static void
2369 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2370 const struct target_waitstatus *ws)
2371 {
2372 char *status_string = target_waitstatus_to_string (ws);
2373 struct ui_file *tmp_stream = mem_fileopen ();
2374 char *text;
2375
2376 /* The text is split over several lines because it was getting too long.
2377 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2378 output as a unit; we want only one timestamp printed if debug_timestamp
2379 is set. */
2380
2381 fprintf_unfiltered (tmp_stream,
2382 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2383 if (PIDGET (waiton_ptid) != -1)
2384 fprintf_unfiltered (tmp_stream,
2385 " [%s]", target_pid_to_str (waiton_ptid));
2386 fprintf_unfiltered (tmp_stream, ", status) =\n");
2387 fprintf_unfiltered (tmp_stream,
2388 "infrun: %d [%s],\n",
2389 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2390 fprintf_unfiltered (tmp_stream,
2391 "infrun: %s\n",
2392 status_string);
2393
2394 text = ui_file_xstrdup (tmp_stream, NULL);
2395
2396 /* This uses %s in part to handle %'s in the text, but also to avoid
2397 a gcc error: the format attribute requires a string literal. */
2398 fprintf_unfiltered (gdb_stdlog, "%s", text);
2399
2400 xfree (status_string);
2401 xfree (text);
2402 ui_file_delete (tmp_stream);
2403 }
2404
2405 /* Prepare and stabilize the inferior for detaching it. E.g.,
2406 detaching while a thread is displaced stepping is a recipe for
2407 crashing it, as nothing would readjust the PC out of the scratch
2408 pad. */
2409
2410 void
2411 prepare_for_detach (void)
2412 {
2413 struct inferior *inf = current_inferior ();
2414 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2415 struct cleanup *old_chain_1;
2416 struct displaced_step_inferior_state *displaced;
2417
2418 displaced = get_displaced_stepping_state (inf->pid);
2419
2420 /* Is any thread of this process displaced stepping? If not,
2421 there's nothing else to do. */
2422 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2423 return;
2424
2425 if (debug_infrun)
2426 fprintf_unfiltered (gdb_stdlog,
2427 "displaced-stepping in-process while detaching");
2428
2429 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2430 inf->detaching = 1;
2431
2432 while (!ptid_equal (displaced->step_ptid, null_ptid))
2433 {
2434 struct cleanup *old_chain_2;
2435 struct execution_control_state ecss;
2436 struct execution_control_state *ecs;
2437
2438 ecs = &ecss;
2439 memset (ecs, 0, sizeof (*ecs));
2440
2441 overlay_cache_invalid = 1;
2442
2443 /* We have to invalidate the registers BEFORE calling
2444 target_wait because they can be loaded from the target while
2445 in target_wait. This makes remote debugging a bit more
2446 efficient for those targets that provide critical registers
2447 as part of their normal status mechanism. */
2448
2449 registers_changed ();
2450
2451 if (deprecated_target_wait_hook)
2452 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2453 else
2454 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2455
2456 if (debug_infrun)
2457 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2458
2459 /* If an error happens while handling the event, propagate GDB's
2460 knowledge of the executing state to the frontend/user running
2461 state. */
2462 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2463
2464 /* In non-stop mode, each thread is handled individually.
2465 Switch early, so the global state is set correctly for this
2466 thread. */
2467 if (non_stop
2468 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2469 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2470 context_switch (ecs->ptid);
2471
2472 /* Now figure out what to do with the result of the result. */
2473 handle_inferior_event (ecs);
2474
2475 /* No error, don't finish the state yet. */
2476 discard_cleanups (old_chain_2);
2477
2478 /* Breakpoints and watchpoints are not installed on the target
2479 at this point, and signals are passed directly to the
2480 inferior, so this must mean the process is gone. */
2481 if (!ecs->wait_some_more)
2482 {
2483 discard_cleanups (old_chain_1);
2484 error (_("Program exited while detaching"));
2485 }
2486 }
2487
2488 discard_cleanups (old_chain_1);
2489 }
2490
2491 /* Wait for control to return from inferior to debugger.
2492
2493 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2494 as if they were SIGTRAP signals. This can be useful during
2495 the startup sequence on some targets such as HP/UX, where
2496 we receive an EXEC event instead of the expected SIGTRAP.
2497
2498 If inferior gets a signal, we may decide to start it up again
2499 instead of returning. That is why there is a loop in this function.
2500 When this function actually returns it means the inferior
2501 should be left stopped and GDB should read more commands. */
2502
2503 void
2504 wait_for_inferior (int treat_exec_as_sigtrap)
2505 {
2506 struct cleanup *old_cleanups;
2507 struct execution_control_state ecss;
2508 struct execution_control_state *ecs;
2509
2510 if (debug_infrun)
2511 fprintf_unfiltered
2512 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2513 treat_exec_as_sigtrap);
2514
2515 old_cleanups =
2516 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2517
2518 ecs = &ecss;
2519 memset (ecs, 0, sizeof (*ecs));
2520
2521 /* We'll update this if & when we switch to a new thread. */
2522 previous_inferior_ptid = inferior_ptid;
2523
2524 while (1)
2525 {
2526 struct cleanup *old_chain;
2527
2528 /* We have to invalidate the registers BEFORE calling target_wait
2529 because they can be loaded from the target while in target_wait.
2530 This makes remote debugging a bit more efficient for those
2531 targets that provide critical registers as part of their normal
2532 status mechanism. */
2533
2534 overlay_cache_invalid = 1;
2535 registers_changed ();
2536
2537 if (deprecated_target_wait_hook)
2538 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2539 else
2540 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2541
2542 if (debug_infrun)
2543 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2544
2545 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2546 {
2547 xfree (ecs->ws.value.execd_pathname);
2548 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2549 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2550 }
2551
2552 /* If an error happens while handling the event, propagate GDB's
2553 knowledge of the executing state to the frontend/user running
2554 state. */
2555 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2556
2557 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2558 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2559 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2560
2561 /* Now figure out what to do with the result of the result. */
2562 handle_inferior_event (ecs);
2563
2564 /* No error, don't finish the state yet. */
2565 discard_cleanups (old_chain);
2566
2567 if (!ecs->wait_some_more)
2568 break;
2569 }
2570
2571 do_cleanups (old_cleanups);
2572 }
2573
2574 /* Asynchronous version of wait_for_inferior. It is called by the
2575 event loop whenever a change of state is detected on the file
2576 descriptor corresponding to the target. It can be called more than
2577 once to complete a single execution command. In such cases we need
2578 to keep the state in a global variable ECSS. If it is the last time
2579 that this function is called for a single execution command, then
2580 report to the user that the inferior has stopped, and do the
2581 necessary cleanups. */
2582
2583 void
2584 fetch_inferior_event (void *client_data)
2585 {
2586 struct execution_control_state ecss;
2587 struct execution_control_state *ecs = &ecss;
2588 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2589 struct cleanup *ts_old_chain;
2590 int was_sync = sync_execution;
2591
2592 memset (ecs, 0, sizeof (*ecs));
2593
2594 /* We'll update this if & when we switch to a new thread. */
2595 previous_inferior_ptid = inferior_ptid;
2596
2597 if (non_stop)
2598 /* In non-stop mode, the user/frontend should not notice a thread
2599 switch due to internal events. Make sure we reverse to the
2600 user selected thread and frame after handling the event and
2601 running any breakpoint commands. */
2602 make_cleanup_restore_current_thread ();
2603
2604 /* We have to invalidate the registers BEFORE calling target_wait
2605 because they can be loaded from the target while in target_wait.
2606 This makes remote debugging a bit more efficient for those
2607 targets that provide critical registers as part of their normal
2608 status mechanism. */
2609
2610 overlay_cache_invalid = 1;
2611 registers_changed ();
2612
2613 if (deprecated_target_wait_hook)
2614 ecs->ptid =
2615 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2616 else
2617 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2618
2619 if (debug_infrun)
2620 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2621
2622 if (non_stop
2623 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2624 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2625 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2626 /* In non-stop mode, each thread is handled individually. Switch
2627 early, so the global state is set correctly for this
2628 thread. */
2629 context_switch (ecs->ptid);
2630
2631 /* If an error happens while handling the event, propagate GDB's
2632 knowledge of the executing state to the frontend/user running
2633 state. */
2634 if (!non_stop)
2635 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2636 else
2637 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2638
2639 /* Now figure out what to do with the result of the result. */
2640 handle_inferior_event (ecs);
2641
2642 if (!ecs->wait_some_more)
2643 {
2644 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2645
2646 delete_step_thread_step_resume_breakpoint ();
2647
2648 /* We may not find an inferior if this was a process exit. */
2649 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2650 normal_stop ();
2651
2652 if (target_has_execution
2653 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2654 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2655 && ecs->event_thread->step_multi
2656 && ecs->event_thread->stop_step)
2657 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2658 else
2659 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2660 }
2661
2662 /* No error, don't finish the thread states yet. */
2663 discard_cleanups (ts_old_chain);
2664
2665 /* Revert thread and frame. */
2666 do_cleanups (old_chain);
2667
2668 /* If the inferior was in sync execution mode, and now isn't,
2669 restore the prompt. */
2670 if (was_sync && !sync_execution)
2671 display_gdb_prompt (0);
2672 }
2673
2674 /* Record the frame and location we're currently stepping through. */
2675 void
2676 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2677 {
2678 struct thread_info *tp = inferior_thread ();
2679
2680 tp->step_frame_id = get_frame_id (frame);
2681 tp->step_stack_frame_id = get_stack_frame_id (frame);
2682
2683 tp->current_symtab = sal.symtab;
2684 tp->current_line = sal.line;
2685 }
2686
2687 /* Clear context switchable stepping state. */
2688
2689 void
2690 init_thread_stepping_state (struct thread_info *tss)
2691 {
2692 tss->stepping_over_breakpoint = 0;
2693 tss->step_after_step_resume_breakpoint = 0;
2694 tss->stepping_through_solib_after_catch = 0;
2695 tss->stepping_through_solib_catchpoints = NULL;
2696 }
2697
2698 /* Return the cached copy of the last pid/waitstatus returned by
2699 target_wait()/deprecated_target_wait_hook(). The data is actually
2700 cached by handle_inferior_event(), which gets called immediately
2701 after target_wait()/deprecated_target_wait_hook(). */
2702
2703 void
2704 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2705 {
2706 *ptidp = target_last_wait_ptid;
2707 *status = target_last_waitstatus;
2708 }
2709
2710 void
2711 nullify_last_target_wait_ptid (void)
2712 {
2713 target_last_wait_ptid = minus_one_ptid;
2714 }
2715
2716 /* Switch thread contexts. */
2717
2718 static void
2719 context_switch (ptid_t ptid)
2720 {
2721 if (debug_infrun)
2722 {
2723 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2724 target_pid_to_str (inferior_ptid));
2725 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2726 target_pid_to_str (ptid));
2727 }
2728
2729 switch_to_thread (ptid);
2730 }
2731
2732 static void
2733 adjust_pc_after_break (struct execution_control_state *ecs)
2734 {
2735 struct regcache *regcache;
2736 struct gdbarch *gdbarch;
2737 struct address_space *aspace;
2738 CORE_ADDR breakpoint_pc;
2739
2740 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2741 we aren't, just return.
2742
2743 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2744 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2745 implemented by software breakpoints should be handled through the normal
2746 breakpoint layer.
2747
2748 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2749 different signals (SIGILL or SIGEMT for instance), but it is less
2750 clear where the PC is pointing afterwards. It may not match
2751 gdbarch_decr_pc_after_break. I don't know any specific target that
2752 generates these signals at breakpoints (the code has been in GDB since at
2753 least 1992) so I can not guess how to handle them here.
2754
2755 In earlier versions of GDB, a target with
2756 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2757 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2758 target with both of these set in GDB history, and it seems unlikely to be
2759 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2760
2761 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2762 return;
2763
2764 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2765 return;
2766
2767 /* In reverse execution, when a breakpoint is hit, the instruction
2768 under it has already been de-executed. The reported PC always
2769 points at the breakpoint address, so adjusting it further would
2770 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2771 architecture:
2772
2773 B1 0x08000000 : INSN1
2774 B2 0x08000001 : INSN2
2775 0x08000002 : INSN3
2776 PC -> 0x08000003 : INSN4
2777
2778 Say you're stopped at 0x08000003 as above. Reverse continuing
2779 from that point should hit B2 as below. Reading the PC when the
2780 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2781 been de-executed already.
2782
2783 B1 0x08000000 : INSN1
2784 B2 PC -> 0x08000001 : INSN2
2785 0x08000002 : INSN3
2786 0x08000003 : INSN4
2787
2788 We can't apply the same logic as for forward execution, because
2789 we would wrongly adjust the PC to 0x08000000, since there's a
2790 breakpoint at PC - 1. We'd then report a hit on B1, although
2791 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2792 behaviour. */
2793 if (execution_direction == EXEC_REVERSE)
2794 return;
2795
2796 /* If this target does not decrement the PC after breakpoints, then
2797 we have nothing to do. */
2798 regcache = get_thread_regcache (ecs->ptid);
2799 gdbarch = get_regcache_arch (regcache);
2800 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2801 return;
2802
2803 aspace = get_regcache_aspace (regcache);
2804
2805 /* Find the location where (if we've hit a breakpoint) the
2806 breakpoint would be. */
2807 breakpoint_pc = regcache_read_pc (regcache)
2808 - gdbarch_decr_pc_after_break (gdbarch);
2809
2810 /* Check whether there actually is a software breakpoint inserted at
2811 that location.
2812
2813 If in non-stop mode, a race condition is possible where we've
2814 removed a breakpoint, but stop events for that breakpoint were
2815 already queued and arrive later. To suppress those spurious
2816 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2817 and retire them after a number of stop events are reported. */
2818 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2819 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2820 {
2821 struct cleanup *old_cleanups = NULL;
2822
2823 if (RECORD_IS_USED)
2824 old_cleanups = record_gdb_operation_disable_set ();
2825
2826 /* When using hardware single-step, a SIGTRAP is reported for both
2827 a completed single-step and a software breakpoint. Need to
2828 differentiate between the two, as the latter needs adjusting
2829 but the former does not.
2830
2831 The SIGTRAP can be due to a completed hardware single-step only if
2832 - we didn't insert software single-step breakpoints
2833 - the thread to be examined is still the current thread
2834 - this thread is currently being stepped
2835
2836 If any of these events did not occur, we must have stopped due
2837 to hitting a software breakpoint, and have to back up to the
2838 breakpoint address.
2839
2840 As a special case, we could have hardware single-stepped a
2841 software breakpoint. In this case (prev_pc == breakpoint_pc),
2842 we also need to back up to the breakpoint address. */
2843
2844 if (singlestep_breakpoints_inserted_p
2845 || !ptid_equal (ecs->ptid, inferior_ptid)
2846 || !currently_stepping (ecs->event_thread)
2847 || ecs->event_thread->prev_pc == breakpoint_pc)
2848 regcache_write_pc (regcache, breakpoint_pc);
2849
2850 if (RECORD_IS_USED)
2851 do_cleanups (old_cleanups);
2852 }
2853 }
2854
2855 void
2856 init_infwait_state (void)
2857 {
2858 waiton_ptid = pid_to_ptid (-1);
2859 infwait_state = infwait_normal_state;
2860 }
2861
2862 void
2863 error_is_running (void)
2864 {
2865 error (_("\
2866 Cannot execute this command while the selected thread is running."));
2867 }
2868
2869 void
2870 ensure_not_running (void)
2871 {
2872 if (is_running (inferior_ptid))
2873 error_is_running ();
2874 }
2875
2876 static int
2877 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2878 {
2879 for (frame = get_prev_frame (frame);
2880 frame != NULL;
2881 frame = get_prev_frame (frame))
2882 {
2883 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2884 return 1;
2885 if (get_frame_type (frame) != INLINE_FRAME)
2886 break;
2887 }
2888
2889 return 0;
2890 }
2891
2892 /* Auxiliary function that handles syscall entry/return events.
2893 It returns 1 if the inferior should keep going (and GDB
2894 should ignore the event), or 0 if the event deserves to be
2895 processed. */
2896
2897 static int
2898 handle_syscall_event (struct execution_control_state *ecs)
2899 {
2900 struct regcache *regcache;
2901 struct gdbarch *gdbarch;
2902 int syscall_number;
2903
2904 if (!ptid_equal (ecs->ptid, inferior_ptid))
2905 context_switch (ecs->ptid);
2906
2907 regcache = get_thread_regcache (ecs->ptid);
2908 gdbarch = get_regcache_arch (regcache);
2909 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2910 stop_pc = regcache_read_pc (regcache);
2911
2912 target_last_waitstatus.value.syscall_number = syscall_number;
2913
2914 if (catch_syscall_enabled () > 0
2915 && catching_syscall_number (syscall_number) > 0)
2916 {
2917 if (debug_infrun)
2918 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2919 syscall_number);
2920
2921 ecs->event_thread->stop_bpstat
2922 = bpstat_stop_status (get_regcache_aspace (regcache),
2923 stop_pc, ecs->ptid);
2924 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2925
2926 if (!ecs->random_signal)
2927 {
2928 /* Catchpoint hit. */
2929 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2930 return 0;
2931 }
2932 }
2933
2934 /* If no catchpoint triggered for this, then keep going. */
2935 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2936 keep_going (ecs);
2937 return 1;
2938 }
2939
2940 /* Given an execution control state that has been freshly filled in
2941 by an event from the inferior, figure out what it means and take
2942 appropriate action. */
2943
2944 static void
2945 handle_inferior_event (struct execution_control_state *ecs)
2946 {
2947 struct frame_info *frame;
2948 struct gdbarch *gdbarch;
2949 int sw_single_step_trap_p = 0;
2950 int stopped_by_watchpoint;
2951 int stepped_after_stopped_by_watchpoint = 0;
2952 struct symtab_and_line stop_pc_sal;
2953 enum stop_kind stop_soon;
2954
2955 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2956 {
2957 /* We had an event in the inferior, but we are not interested in
2958 handling it at this level. The lower layers have already
2959 done what needs to be done, if anything.
2960
2961 One of the possible circumstances for this is when the
2962 inferior produces output for the console. The inferior has
2963 not stopped, and we are ignoring the event. Another possible
2964 circumstance is any event which the lower level knows will be
2965 reported multiple times without an intervening resume. */
2966 if (debug_infrun)
2967 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2968 prepare_to_wait (ecs);
2969 return;
2970 }
2971
2972 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2973 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2974 {
2975 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2976
2977 gdb_assert (inf);
2978 stop_soon = inf->stop_soon;
2979 }
2980 else
2981 stop_soon = NO_STOP_QUIETLY;
2982
2983 /* Cache the last pid/waitstatus. */
2984 target_last_wait_ptid = ecs->ptid;
2985 target_last_waitstatus = ecs->ws;
2986
2987 /* Always clear state belonging to the previous time we stopped. */
2988 stop_stack_dummy = STOP_NONE;
2989
2990 /* If it's a new process, add it to the thread database */
2991
2992 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2993 && !ptid_equal (ecs->ptid, minus_one_ptid)
2994 && !in_thread_list (ecs->ptid));
2995
2996 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2997 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2998 add_thread (ecs->ptid);
2999
3000 ecs->event_thread = find_thread_ptid (ecs->ptid);
3001
3002 /* Dependent on valid ECS->EVENT_THREAD. */
3003 adjust_pc_after_break (ecs);
3004
3005 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3006 reinit_frame_cache ();
3007
3008 breakpoint_retire_moribund ();
3009
3010 /* First, distinguish signals caused by the debugger from signals
3011 that have to do with the program's own actions. Note that
3012 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3013 on the operating system version. Here we detect when a SIGILL or
3014 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3015 something similar for SIGSEGV, since a SIGSEGV will be generated
3016 when we're trying to execute a breakpoint instruction on a
3017 non-executable stack. This happens for call dummy breakpoints
3018 for architectures like SPARC that place call dummies on the
3019 stack. */
3020 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3021 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3022 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3023 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3024 {
3025 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3026
3027 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3028 regcache_read_pc (regcache)))
3029 {
3030 if (debug_infrun)
3031 fprintf_unfiltered (gdb_stdlog,
3032 "infrun: Treating signal as SIGTRAP\n");
3033 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3034 }
3035 }
3036
3037 /* Mark the non-executing threads accordingly. In all-stop, all
3038 threads of all processes are stopped when we get any event
3039 reported. In non-stop mode, only the event thread stops. If
3040 we're handling a process exit in non-stop mode, there's nothing
3041 to do, as threads of the dead process are gone, and threads of
3042 any other process were left running. */
3043 if (!non_stop)
3044 set_executing (minus_one_ptid, 0);
3045 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3046 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3047 set_executing (inferior_ptid, 0);
3048
3049 switch (infwait_state)
3050 {
3051 case infwait_thread_hop_state:
3052 if (debug_infrun)
3053 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3054 break;
3055
3056 case infwait_normal_state:
3057 if (debug_infrun)
3058 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3059 break;
3060
3061 case infwait_step_watch_state:
3062 if (debug_infrun)
3063 fprintf_unfiltered (gdb_stdlog,
3064 "infrun: infwait_step_watch_state\n");
3065
3066 stepped_after_stopped_by_watchpoint = 1;
3067 break;
3068
3069 case infwait_nonstep_watch_state:
3070 if (debug_infrun)
3071 fprintf_unfiltered (gdb_stdlog,
3072 "infrun: infwait_nonstep_watch_state\n");
3073 insert_breakpoints ();
3074
3075 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3076 handle things like signals arriving and other things happening
3077 in combination correctly? */
3078 stepped_after_stopped_by_watchpoint = 1;
3079 break;
3080
3081 default:
3082 internal_error (__FILE__, __LINE__, _("bad switch"));
3083 }
3084
3085 infwait_state = infwait_normal_state;
3086 waiton_ptid = pid_to_ptid (-1);
3087
3088 switch (ecs->ws.kind)
3089 {
3090 case TARGET_WAITKIND_LOADED:
3091 if (debug_infrun)
3092 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3093 /* Ignore gracefully during startup of the inferior, as it might
3094 be the shell which has just loaded some objects, otherwise
3095 add the symbols for the newly loaded objects. Also ignore at
3096 the beginning of an attach or remote session; we will query
3097 the full list of libraries once the connection is
3098 established. */
3099 if (stop_soon == NO_STOP_QUIETLY)
3100 {
3101 /* Check for any newly added shared libraries if we're
3102 supposed to be adding them automatically. Switch
3103 terminal for any messages produced by
3104 breakpoint_re_set. */
3105 target_terminal_ours_for_output ();
3106 /* NOTE: cagney/2003-11-25: Make certain that the target
3107 stack's section table is kept up-to-date. Architectures,
3108 (e.g., PPC64), use the section table to perform
3109 operations such as address => section name and hence
3110 require the table to contain all sections (including
3111 those found in shared libraries). */
3112 #ifdef SOLIB_ADD
3113 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3114 #else
3115 solib_add (NULL, 0, &current_target, auto_solib_add);
3116 #endif
3117 target_terminal_inferior ();
3118
3119 /* If requested, stop when the dynamic linker notifies
3120 gdb of events. This allows the user to get control
3121 and place breakpoints in initializer routines for
3122 dynamically loaded objects (among other things). */
3123 if (stop_on_solib_events)
3124 {
3125 /* Make sure we print "Stopped due to solib-event" in
3126 normal_stop. */
3127 stop_print_frame = 1;
3128
3129 stop_stepping (ecs);
3130 return;
3131 }
3132
3133 /* NOTE drow/2007-05-11: This might be a good place to check
3134 for "catch load". */
3135 }
3136
3137 /* If we are skipping through a shell, or through shared library
3138 loading that we aren't interested in, resume the program. If
3139 we're running the program normally, also resume. But stop if
3140 we're attaching or setting up a remote connection. */
3141 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3142 {
3143 /* Loading of shared libraries might have changed breakpoint
3144 addresses. Make sure new breakpoints are inserted. */
3145 if (stop_soon == NO_STOP_QUIETLY
3146 && !breakpoints_always_inserted_mode ())
3147 insert_breakpoints ();
3148 resume (0, TARGET_SIGNAL_0);
3149 prepare_to_wait (ecs);
3150 return;
3151 }
3152
3153 break;
3154
3155 case TARGET_WAITKIND_SPURIOUS:
3156 if (debug_infrun)
3157 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3158 resume (0, TARGET_SIGNAL_0);
3159 prepare_to_wait (ecs);
3160 return;
3161
3162 case TARGET_WAITKIND_EXITED:
3163 if (debug_infrun)
3164 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3165 inferior_ptid = ecs->ptid;
3166 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3167 set_current_program_space (current_inferior ()->pspace);
3168 handle_vfork_child_exec_or_exit (0);
3169 target_terminal_ours (); /* Must do this before mourn anyway */
3170 print_exited_reason (ecs->ws.value.integer);
3171
3172 /* Record the exit code in the convenience variable $_exitcode, so
3173 that the user can inspect this again later. */
3174 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3175 (LONGEST) ecs->ws.value.integer);
3176 gdb_flush (gdb_stdout);
3177 target_mourn_inferior ();
3178 singlestep_breakpoints_inserted_p = 0;
3179 cancel_single_step_breakpoints ();
3180 stop_print_frame = 0;
3181 stop_stepping (ecs);
3182 return;
3183
3184 case TARGET_WAITKIND_SIGNALLED:
3185 if (debug_infrun)
3186 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3187 inferior_ptid = ecs->ptid;
3188 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3189 set_current_program_space (current_inferior ()->pspace);
3190 handle_vfork_child_exec_or_exit (0);
3191 stop_print_frame = 0;
3192 target_terminal_ours (); /* Must do this before mourn anyway */
3193
3194 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3195 reach here unless the inferior is dead. However, for years
3196 target_kill() was called here, which hints that fatal signals aren't
3197 really fatal on some systems. If that's true, then some changes
3198 may be needed. */
3199 target_mourn_inferior ();
3200
3201 print_signal_exited_reason (ecs->ws.value.sig);
3202 singlestep_breakpoints_inserted_p = 0;
3203 cancel_single_step_breakpoints ();
3204 stop_stepping (ecs);
3205 return;
3206
3207 /* The following are the only cases in which we keep going;
3208 the above cases end in a continue or goto. */
3209 case TARGET_WAITKIND_FORKED:
3210 case TARGET_WAITKIND_VFORKED:
3211 if (debug_infrun)
3212 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3213
3214 if (!ptid_equal (ecs->ptid, inferior_ptid))
3215 {
3216 context_switch (ecs->ptid);
3217 reinit_frame_cache ();
3218 }
3219
3220 /* Immediately detach breakpoints from the child before there's
3221 any chance of letting the user delete breakpoints from the
3222 breakpoint lists. If we don't do this early, it's easy to
3223 leave left over traps in the child, vis: "break foo; catch
3224 fork; c; <fork>; del; c; <child calls foo>". We only follow
3225 the fork on the last `continue', and by that time the
3226 breakpoint at "foo" is long gone from the breakpoint table.
3227 If we vforked, then we don't need to unpatch here, since both
3228 parent and child are sharing the same memory pages; we'll
3229 need to unpatch at follow/detach time instead to be certain
3230 that new breakpoints added between catchpoint hit time and
3231 vfork follow are detached. */
3232 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3233 {
3234 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3235
3236 /* This won't actually modify the breakpoint list, but will
3237 physically remove the breakpoints from the child. */
3238 detach_breakpoints (child_pid);
3239 }
3240
3241 if (singlestep_breakpoints_inserted_p)
3242 {
3243 /* Pull the single step breakpoints out of the target. */
3244 remove_single_step_breakpoints ();
3245 singlestep_breakpoints_inserted_p = 0;
3246 }
3247
3248 /* In case the event is caught by a catchpoint, remember that
3249 the event is to be followed at the next resume of the thread,
3250 and not immediately. */
3251 ecs->event_thread->pending_follow = ecs->ws;
3252
3253 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3254
3255 ecs->event_thread->stop_bpstat
3256 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3257 stop_pc, ecs->ptid);
3258
3259 /* Note that we're interested in knowing the bpstat actually
3260 causes a stop, not just if it may explain the signal.
3261 Software watchpoints, for example, always appear in the
3262 bpstat. */
3263 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3264
3265 /* If no catchpoint triggered for this, then keep going. */
3266 if (ecs->random_signal)
3267 {
3268 ptid_t parent;
3269 ptid_t child;
3270 int should_resume;
3271 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3272
3273 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3274
3275 should_resume = follow_fork ();
3276
3277 parent = ecs->ptid;
3278 child = ecs->ws.value.related_pid;
3279
3280 /* In non-stop mode, also resume the other branch. */
3281 if (non_stop && !detach_fork)
3282 {
3283 if (follow_child)
3284 switch_to_thread (parent);
3285 else
3286 switch_to_thread (child);
3287
3288 ecs->event_thread = inferior_thread ();
3289 ecs->ptid = inferior_ptid;
3290 keep_going (ecs);
3291 }
3292
3293 if (follow_child)
3294 switch_to_thread (child);
3295 else
3296 switch_to_thread (parent);
3297
3298 ecs->event_thread = inferior_thread ();
3299 ecs->ptid = inferior_ptid;
3300
3301 if (should_resume)
3302 keep_going (ecs);
3303 else
3304 stop_stepping (ecs);
3305 return;
3306 }
3307 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3308 goto process_event_stop_test;
3309
3310 case TARGET_WAITKIND_VFORK_DONE:
3311 /* Done with the shared memory region. Re-insert breakpoints in
3312 the parent, and keep going. */
3313
3314 if (debug_infrun)
3315 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3316
3317 if (!ptid_equal (ecs->ptid, inferior_ptid))
3318 context_switch (ecs->ptid);
3319
3320 current_inferior ()->waiting_for_vfork_done = 0;
3321 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3322 /* This also takes care of reinserting breakpoints in the
3323 previously locked inferior. */
3324 keep_going (ecs);
3325 return;
3326
3327 case TARGET_WAITKIND_EXECD:
3328 if (debug_infrun)
3329 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3330
3331 if (!ptid_equal (ecs->ptid, inferior_ptid))
3332 {
3333 context_switch (ecs->ptid);
3334 reinit_frame_cache ();
3335 }
3336
3337 singlestep_breakpoints_inserted_p = 0;
3338 cancel_single_step_breakpoints ();
3339
3340 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3341
3342 /* Do whatever is necessary to the parent branch of the vfork. */
3343 handle_vfork_child_exec_or_exit (1);
3344
3345 /* This causes the eventpoints and symbol table to be reset.
3346 Must do this now, before trying to determine whether to
3347 stop. */
3348 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3349
3350 ecs->event_thread->stop_bpstat
3351 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3352 stop_pc, ecs->ptid);
3353 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3354
3355 /* Note that this may be referenced from inside
3356 bpstat_stop_status above, through inferior_has_execd. */
3357 xfree (ecs->ws.value.execd_pathname);
3358 ecs->ws.value.execd_pathname = NULL;
3359
3360 /* If no catchpoint triggered for this, then keep going. */
3361 if (ecs->random_signal)
3362 {
3363 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3364 keep_going (ecs);
3365 return;
3366 }
3367 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3368 goto process_event_stop_test;
3369
3370 /* Be careful not to try to gather much state about a thread
3371 that's in a syscall. It's frequently a losing proposition. */
3372 case TARGET_WAITKIND_SYSCALL_ENTRY:
3373 if (debug_infrun)
3374 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3375 /* Getting the current syscall number */
3376 if (handle_syscall_event (ecs) != 0)
3377 return;
3378 goto process_event_stop_test;
3379
3380 /* Before examining the threads further, step this thread to
3381 get it entirely out of the syscall. (We get notice of the
3382 event when the thread is just on the verge of exiting a
3383 syscall. Stepping one instruction seems to get it back
3384 into user code.) */
3385 case TARGET_WAITKIND_SYSCALL_RETURN:
3386 if (debug_infrun)
3387 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3388 if (handle_syscall_event (ecs) != 0)
3389 return;
3390 goto process_event_stop_test;
3391
3392 case TARGET_WAITKIND_STOPPED:
3393 if (debug_infrun)
3394 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3395 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3396 break;
3397
3398 case TARGET_WAITKIND_NO_HISTORY:
3399 /* Reverse execution: target ran out of history info. */
3400 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3401 print_no_history_reason ();
3402 stop_stepping (ecs);
3403 return;
3404 }
3405
3406 if (ecs->new_thread_event)
3407 {
3408 if (non_stop)
3409 /* Non-stop assumes that the target handles adding new threads
3410 to the thread list. */
3411 internal_error (__FILE__, __LINE__, "\
3412 targets should add new threads to the thread list themselves in non-stop mode.");
3413
3414 /* We may want to consider not doing a resume here in order to
3415 give the user a chance to play with the new thread. It might
3416 be good to make that a user-settable option. */
3417
3418 /* At this point, all threads are stopped (happens automatically
3419 in either the OS or the native code). Therefore we need to
3420 continue all threads in order to make progress. */
3421
3422 if (!ptid_equal (ecs->ptid, inferior_ptid))
3423 context_switch (ecs->ptid);
3424 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3425 prepare_to_wait (ecs);
3426 return;
3427 }
3428
3429 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3430 {
3431 /* Do we need to clean up the state of a thread that has
3432 completed a displaced single-step? (Doing so usually affects
3433 the PC, so do it here, before we set stop_pc.) */
3434 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3435
3436 /* If we either finished a single-step or hit a breakpoint, but
3437 the user wanted this thread to be stopped, pretend we got a
3438 SIG0 (generic unsignaled stop). */
3439
3440 if (ecs->event_thread->stop_requested
3441 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3442 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3443 }
3444
3445 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3446
3447 if (debug_infrun)
3448 {
3449 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3450 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3451 struct cleanup *old_chain = save_inferior_ptid ();
3452
3453 inferior_ptid = ecs->ptid;
3454
3455 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3456 paddress (gdbarch, stop_pc));
3457 if (target_stopped_by_watchpoint ())
3458 {
3459 CORE_ADDR addr;
3460
3461 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3462
3463 if (target_stopped_data_address (&current_target, &addr))
3464 fprintf_unfiltered (gdb_stdlog,
3465 "infrun: stopped data address = %s\n",
3466 paddress (gdbarch, addr));
3467 else
3468 fprintf_unfiltered (gdb_stdlog,
3469 "infrun: (no data address available)\n");
3470 }
3471
3472 do_cleanups (old_chain);
3473 }
3474
3475 if (stepping_past_singlestep_breakpoint)
3476 {
3477 gdb_assert (singlestep_breakpoints_inserted_p);
3478 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3479 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3480
3481 stepping_past_singlestep_breakpoint = 0;
3482
3483 /* We've either finished single-stepping past the single-step
3484 breakpoint, or stopped for some other reason. It would be nice if
3485 we could tell, but we can't reliably. */
3486 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3487 {
3488 if (debug_infrun)
3489 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3490 /* Pull the single step breakpoints out of the target. */
3491 remove_single_step_breakpoints ();
3492 singlestep_breakpoints_inserted_p = 0;
3493
3494 ecs->random_signal = 0;
3495 ecs->event_thread->trap_expected = 0;
3496
3497 context_switch (saved_singlestep_ptid);
3498 if (deprecated_context_hook)
3499 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3500
3501 resume (1, TARGET_SIGNAL_0);
3502 prepare_to_wait (ecs);
3503 return;
3504 }
3505 }
3506
3507 if (!ptid_equal (deferred_step_ptid, null_ptid))
3508 {
3509 /* In non-stop mode, there's never a deferred_step_ptid set. */
3510 gdb_assert (!non_stop);
3511
3512 /* If we stopped for some other reason than single-stepping, ignore
3513 the fact that we were supposed to switch back. */
3514 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3515 {
3516 if (debug_infrun)
3517 fprintf_unfiltered (gdb_stdlog,
3518 "infrun: handling deferred step\n");
3519
3520 /* Pull the single step breakpoints out of the target. */
3521 if (singlestep_breakpoints_inserted_p)
3522 {
3523 remove_single_step_breakpoints ();
3524 singlestep_breakpoints_inserted_p = 0;
3525 }
3526
3527 /* Note: We do not call context_switch at this point, as the
3528 context is already set up for stepping the original thread. */
3529 switch_to_thread (deferred_step_ptid);
3530 deferred_step_ptid = null_ptid;
3531 /* Suppress spurious "Switching to ..." message. */
3532 previous_inferior_ptid = inferior_ptid;
3533
3534 resume (1, TARGET_SIGNAL_0);
3535 prepare_to_wait (ecs);
3536 return;
3537 }
3538
3539 deferred_step_ptid = null_ptid;
3540 }
3541
3542 /* See if a thread hit a thread-specific breakpoint that was meant for
3543 another thread. If so, then step that thread past the breakpoint,
3544 and continue it. */
3545
3546 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3547 {
3548 int thread_hop_needed = 0;
3549 struct address_space *aspace =
3550 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3551
3552 /* Check if a regular breakpoint has been hit before checking
3553 for a potential single step breakpoint. Otherwise, GDB will
3554 not see this breakpoint hit when stepping onto breakpoints. */
3555 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3556 {
3557 ecs->random_signal = 0;
3558 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3559 thread_hop_needed = 1;
3560 }
3561 else if (singlestep_breakpoints_inserted_p)
3562 {
3563 /* We have not context switched yet, so this should be true
3564 no matter which thread hit the singlestep breakpoint. */
3565 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3566 if (debug_infrun)
3567 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3568 "trap for %s\n",
3569 target_pid_to_str (ecs->ptid));
3570
3571 ecs->random_signal = 0;
3572 /* The call to in_thread_list is necessary because PTIDs sometimes
3573 change when we go from single-threaded to multi-threaded. If
3574 the singlestep_ptid is still in the list, assume that it is
3575 really different from ecs->ptid. */
3576 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3577 && in_thread_list (singlestep_ptid))
3578 {
3579 /* If the PC of the thread we were trying to single-step
3580 has changed, discard this event (which we were going
3581 to ignore anyway), and pretend we saw that thread
3582 trap. This prevents us continuously moving the
3583 single-step breakpoint forward, one instruction at a
3584 time. If the PC has changed, then the thread we were
3585 trying to single-step has trapped or been signalled,
3586 but the event has not been reported to GDB yet.
3587
3588 There might be some cases where this loses signal
3589 information, if a signal has arrived at exactly the
3590 same time that the PC changed, but this is the best
3591 we can do with the information available. Perhaps we
3592 should arrange to report all events for all threads
3593 when they stop, or to re-poll the remote looking for
3594 this particular thread (i.e. temporarily enable
3595 schedlock). */
3596
3597 CORE_ADDR new_singlestep_pc
3598 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3599
3600 if (new_singlestep_pc != singlestep_pc)
3601 {
3602 enum target_signal stop_signal;
3603
3604 if (debug_infrun)
3605 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3606 " but expected thread advanced also\n");
3607
3608 /* The current context still belongs to
3609 singlestep_ptid. Don't swap here, since that's
3610 the context we want to use. Just fudge our
3611 state and continue. */
3612 stop_signal = ecs->event_thread->stop_signal;
3613 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3614 ecs->ptid = singlestep_ptid;
3615 ecs->event_thread = find_thread_ptid (ecs->ptid);
3616 ecs->event_thread->stop_signal = stop_signal;
3617 stop_pc = new_singlestep_pc;
3618 }
3619 else
3620 {
3621 if (debug_infrun)
3622 fprintf_unfiltered (gdb_stdlog,
3623 "infrun: unexpected thread\n");
3624
3625 thread_hop_needed = 1;
3626 stepping_past_singlestep_breakpoint = 1;
3627 saved_singlestep_ptid = singlestep_ptid;
3628 }
3629 }
3630 }
3631
3632 if (thread_hop_needed)
3633 {
3634 struct regcache *thread_regcache;
3635 int remove_status = 0;
3636
3637 if (debug_infrun)
3638 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3639
3640 /* Switch context before touching inferior memory, the
3641 previous thread may have exited. */
3642 if (!ptid_equal (inferior_ptid, ecs->ptid))
3643 context_switch (ecs->ptid);
3644
3645 /* Saw a breakpoint, but it was hit by the wrong thread.
3646 Just continue. */
3647
3648 if (singlestep_breakpoints_inserted_p)
3649 {
3650 /* Pull the single step breakpoints out of the target. */
3651 remove_single_step_breakpoints ();
3652 singlestep_breakpoints_inserted_p = 0;
3653 }
3654
3655 /* If the arch can displace step, don't remove the
3656 breakpoints. */
3657 thread_regcache = get_thread_regcache (ecs->ptid);
3658 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3659 remove_status = remove_breakpoints ();
3660
3661 /* Did we fail to remove breakpoints? If so, try
3662 to set the PC past the bp. (There's at least
3663 one situation in which we can fail to remove
3664 the bp's: On HP-UX's that use ttrace, we can't
3665 change the address space of a vforking child
3666 process until the child exits (well, okay, not
3667 then either :-) or execs. */
3668 if (remove_status != 0)
3669 error (_("Cannot step over breakpoint hit in wrong thread"));
3670 else
3671 { /* Single step */
3672 if (!non_stop)
3673 {
3674 /* Only need to require the next event from this
3675 thread in all-stop mode. */
3676 waiton_ptid = ecs->ptid;
3677 infwait_state = infwait_thread_hop_state;
3678 }
3679
3680 ecs->event_thread->stepping_over_breakpoint = 1;
3681 keep_going (ecs);
3682 return;
3683 }
3684 }
3685 else if (singlestep_breakpoints_inserted_p)
3686 {
3687 sw_single_step_trap_p = 1;
3688 ecs->random_signal = 0;
3689 }
3690 }
3691 else
3692 ecs->random_signal = 1;
3693
3694 /* See if something interesting happened to the non-current thread. If
3695 so, then switch to that thread. */
3696 if (!ptid_equal (ecs->ptid, inferior_ptid))
3697 {
3698 if (debug_infrun)
3699 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3700
3701 context_switch (ecs->ptid);
3702
3703 if (deprecated_context_hook)
3704 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3705 }
3706
3707 /* At this point, get hold of the now-current thread's frame. */
3708 frame = get_current_frame ();
3709 gdbarch = get_frame_arch (frame);
3710
3711 if (singlestep_breakpoints_inserted_p)
3712 {
3713 /* Pull the single step breakpoints out of the target. */
3714 remove_single_step_breakpoints ();
3715 singlestep_breakpoints_inserted_p = 0;
3716 }
3717
3718 if (stepped_after_stopped_by_watchpoint)
3719 stopped_by_watchpoint = 0;
3720 else
3721 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3722
3723 /* If necessary, step over this watchpoint. We'll be back to display
3724 it in a moment. */
3725 if (stopped_by_watchpoint
3726 && (target_have_steppable_watchpoint
3727 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3728 {
3729 /* At this point, we are stopped at an instruction which has
3730 attempted to write to a piece of memory under control of
3731 a watchpoint. The instruction hasn't actually executed
3732 yet. If we were to evaluate the watchpoint expression
3733 now, we would get the old value, and therefore no change
3734 would seem to have occurred.
3735
3736 In order to make watchpoints work `right', we really need
3737 to complete the memory write, and then evaluate the
3738 watchpoint expression. We do this by single-stepping the
3739 target.
3740
3741 It may not be necessary to disable the watchpoint to stop over
3742 it. For example, the PA can (with some kernel cooperation)
3743 single step over a watchpoint without disabling the watchpoint.
3744
3745 It is far more common to need to disable a watchpoint to step
3746 the inferior over it. If we have non-steppable watchpoints,
3747 we must disable the current watchpoint; it's simplest to
3748 disable all watchpoints and breakpoints. */
3749 int hw_step = 1;
3750
3751 if (!target_have_steppable_watchpoint)
3752 remove_breakpoints ();
3753 /* Single step */
3754 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3755 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3756 waiton_ptid = ecs->ptid;
3757 if (target_have_steppable_watchpoint)
3758 infwait_state = infwait_step_watch_state;
3759 else
3760 infwait_state = infwait_nonstep_watch_state;
3761 prepare_to_wait (ecs);
3762 return;
3763 }
3764
3765 ecs->stop_func_start = 0;
3766 ecs->stop_func_end = 0;
3767 ecs->stop_func_name = 0;
3768 /* Don't care about return value; stop_func_start and stop_func_name
3769 will both be 0 if it doesn't work. */
3770 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3771 &ecs->stop_func_start, &ecs->stop_func_end);
3772 ecs->stop_func_start
3773 += gdbarch_deprecated_function_start_offset (gdbarch);
3774 ecs->event_thread->stepping_over_breakpoint = 0;
3775 bpstat_clear (&ecs->event_thread->stop_bpstat);
3776 ecs->event_thread->stop_step = 0;
3777 stop_print_frame = 1;
3778 ecs->random_signal = 0;
3779 stopped_by_random_signal = 0;
3780
3781 /* Hide inlined functions starting here, unless we just performed stepi or
3782 nexti. After stepi and nexti, always show the innermost frame (not any
3783 inline function call sites). */
3784 if (ecs->event_thread->step_range_end != 1)
3785 skip_inline_frames (ecs->ptid);
3786
3787 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3788 && ecs->event_thread->trap_expected
3789 && gdbarch_single_step_through_delay_p (gdbarch)
3790 && currently_stepping (ecs->event_thread))
3791 {
3792 /* We're trying to step off a breakpoint. Turns out that we're
3793 also on an instruction that needs to be stepped multiple
3794 times before it's been fully executing. E.g., architectures
3795 with a delay slot. It needs to be stepped twice, once for
3796 the instruction and once for the delay slot. */
3797 int step_through_delay
3798 = gdbarch_single_step_through_delay (gdbarch, frame);
3799
3800 if (debug_infrun && step_through_delay)
3801 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3802 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3803 {
3804 /* The user issued a continue when stopped at a breakpoint.
3805 Set up for another trap and get out of here. */
3806 ecs->event_thread->stepping_over_breakpoint = 1;
3807 keep_going (ecs);
3808 return;
3809 }
3810 else if (step_through_delay)
3811 {
3812 /* The user issued a step when stopped at a breakpoint.
3813 Maybe we should stop, maybe we should not - the delay
3814 slot *might* correspond to a line of source. In any
3815 case, don't decide that here, just set
3816 ecs->stepping_over_breakpoint, making sure we
3817 single-step again before breakpoints are re-inserted. */
3818 ecs->event_thread->stepping_over_breakpoint = 1;
3819 }
3820 }
3821
3822 /* Look at the cause of the stop, and decide what to do.
3823 The alternatives are:
3824 1) stop_stepping and return; to really stop and return to the debugger,
3825 2) keep_going and return to start up again
3826 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3827 3) set ecs->random_signal to 1, and the decision between 1 and 2
3828 will be made according to the signal handling tables. */
3829
3830 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3831 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3832 || stop_soon == STOP_QUIETLY_REMOTE)
3833 {
3834 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3835 {
3836 if (debug_infrun)
3837 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3838 stop_print_frame = 0;
3839 stop_stepping (ecs);
3840 return;
3841 }
3842
3843 /* This is originated from start_remote(), start_inferior() and
3844 shared libraries hook functions. */
3845 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3846 {
3847 if (debug_infrun)
3848 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3849 stop_stepping (ecs);
3850 return;
3851 }
3852
3853 /* This originates from attach_command(). We need to overwrite
3854 the stop_signal here, because some kernels don't ignore a
3855 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3856 See more comments in inferior.h. On the other hand, if we
3857 get a non-SIGSTOP, report it to the user - assume the backend
3858 will handle the SIGSTOP if it should show up later.
3859
3860 Also consider that the attach is complete when we see a
3861 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3862 target extended-remote report it instead of a SIGSTOP
3863 (e.g. gdbserver). We already rely on SIGTRAP being our
3864 signal, so this is no exception.
3865
3866 Also consider that the attach is complete when we see a
3867 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3868 the target to stop all threads of the inferior, in case the
3869 low level attach operation doesn't stop them implicitly. If
3870 they weren't stopped implicitly, then the stub will report a
3871 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3872 other than GDB's request. */
3873 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3874 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3875 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3876 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3877 {
3878 stop_stepping (ecs);
3879 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3880 return;
3881 }
3882
3883 /* See if there is a breakpoint at the current PC. */
3884 ecs->event_thread->stop_bpstat
3885 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3886 stop_pc, ecs->ptid);
3887
3888 /* Following in case break condition called a
3889 function. */
3890 stop_print_frame = 1;
3891
3892 /* This is where we handle "moribund" watchpoints. Unlike
3893 software breakpoints traps, hardware watchpoint traps are
3894 always distinguishable from random traps. If no high-level
3895 watchpoint is associated with the reported stop data address
3896 anymore, then the bpstat does not explain the signal ---
3897 simply make sure to ignore it if `stopped_by_watchpoint' is
3898 set. */
3899
3900 if (debug_infrun
3901 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3902 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3903 && stopped_by_watchpoint)
3904 fprintf_unfiltered (gdb_stdlog, "\
3905 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3906
3907 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3908 at one stage in the past included checks for an inferior
3909 function call's call dummy's return breakpoint. The original
3910 comment, that went with the test, read:
3911
3912 ``End of a stack dummy. Some systems (e.g. Sony news) give
3913 another signal besides SIGTRAP, so check here as well as
3914 above.''
3915
3916 If someone ever tries to get call dummys on a
3917 non-executable stack to work (where the target would stop
3918 with something like a SIGSEGV), then those tests might need
3919 to be re-instated. Given, however, that the tests were only
3920 enabled when momentary breakpoints were not being used, I
3921 suspect that it won't be the case.
3922
3923 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3924 be necessary for call dummies on a non-executable stack on
3925 SPARC. */
3926
3927 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3928 ecs->random_signal
3929 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3930 || stopped_by_watchpoint
3931 || ecs->event_thread->trap_expected
3932 || (ecs->event_thread->step_range_end
3933 && ecs->event_thread->step_resume_breakpoint == NULL));
3934 else
3935 {
3936 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3937 if (!ecs->random_signal)
3938 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3939 }
3940 }
3941
3942 /* When we reach this point, we've pretty much decided
3943 that the reason for stopping must've been a random
3944 (unexpected) signal. */
3945
3946 else
3947 ecs->random_signal = 1;
3948
3949 process_event_stop_test:
3950
3951 /* Re-fetch current thread's frame in case we did a
3952 "goto process_event_stop_test" above. */
3953 frame = get_current_frame ();
3954 gdbarch = get_frame_arch (frame);
3955
3956 /* For the program's own signals, act according to
3957 the signal handling tables. */
3958
3959 if (ecs->random_signal)
3960 {
3961 /* Signal not for debugging purposes. */
3962 int printed = 0;
3963 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3964
3965 if (debug_infrun)
3966 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3967 ecs->event_thread->stop_signal);
3968
3969 stopped_by_random_signal = 1;
3970
3971 if (signal_print[ecs->event_thread->stop_signal])
3972 {
3973 printed = 1;
3974 target_terminal_ours_for_output ();
3975 print_signal_received_reason (ecs->event_thread->stop_signal);
3976 }
3977 /* Always stop on signals if we're either just gaining control
3978 of the program, or the user explicitly requested this thread
3979 to remain stopped. */
3980 if (stop_soon != NO_STOP_QUIETLY
3981 || ecs->event_thread->stop_requested
3982 || (!inf->detaching
3983 && signal_stop_state (ecs->event_thread->stop_signal)))
3984 {
3985 stop_stepping (ecs);
3986 return;
3987 }
3988 /* If not going to stop, give terminal back
3989 if we took it away. */
3990 else if (printed)
3991 target_terminal_inferior ();
3992
3993 /* Clear the signal if it should not be passed. */
3994 if (signal_program[ecs->event_thread->stop_signal] == 0)
3995 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3996
3997 if (ecs->event_thread->prev_pc == stop_pc
3998 && ecs->event_thread->trap_expected
3999 && ecs->event_thread->step_resume_breakpoint == NULL)
4000 {
4001 /* We were just starting a new sequence, attempting to
4002 single-step off of a breakpoint and expecting a SIGTRAP.
4003 Instead this signal arrives. This signal will take us out
4004 of the stepping range so GDB needs to remember to, when
4005 the signal handler returns, resume stepping off that
4006 breakpoint. */
4007 /* To simplify things, "continue" is forced to use the same
4008 code paths as single-step - set a breakpoint at the
4009 signal return address and then, once hit, step off that
4010 breakpoint. */
4011 if (debug_infrun)
4012 fprintf_unfiltered (gdb_stdlog,
4013 "infrun: signal arrived while stepping over "
4014 "breakpoint\n");
4015
4016 insert_step_resume_breakpoint_at_frame (frame);
4017 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4018 keep_going (ecs);
4019 return;
4020 }
4021
4022 if (ecs->event_thread->step_range_end != 0
4023 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
4024 && (ecs->event_thread->step_range_start <= stop_pc
4025 && stop_pc < ecs->event_thread->step_range_end)
4026 && frame_id_eq (get_stack_frame_id (frame),
4027 ecs->event_thread->step_stack_frame_id)
4028 && ecs->event_thread->step_resume_breakpoint == NULL)
4029 {
4030 /* The inferior is about to take a signal that will take it
4031 out of the single step range. Set a breakpoint at the
4032 current PC (which is presumably where the signal handler
4033 will eventually return) and then allow the inferior to
4034 run free.
4035
4036 Note that this is only needed for a signal delivered
4037 while in the single-step range. Nested signals aren't a
4038 problem as they eventually all return. */
4039 if (debug_infrun)
4040 fprintf_unfiltered (gdb_stdlog,
4041 "infrun: signal may take us out of "
4042 "single-step range\n");
4043
4044 insert_step_resume_breakpoint_at_frame (frame);
4045 keep_going (ecs);
4046 return;
4047 }
4048
4049 /* Note: step_resume_breakpoint may be non-NULL. This occures
4050 when either there's a nested signal, or when there's a
4051 pending signal enabled just as the signal handler returns
4052 (leaving the inferior at the step-resume-breakpoint without
4053 actually executing it). Either way continue until the
4054 breakpoint is really hit. */
4055 keep_going (ecs);
4056 return;
4057 }
4058
4059 /* Handle cases caused by hitting a breakpoint. */
4060 {
4061 CORE_ADDR jmp_buf_pc;
4062 struct bpstat_what what;
4063
4064 what = bpstat_what (ecs->event_thread->stop_bpstat);
4065
4066 if (what.call_dummy)
4067 {
4068 stop_stack_dummy = what.call_dummy;
4069 }
4070
4071 /* If we hit an internal event that triggers symbol changes, the
4072 current frame will be invalidated within bpstat_what (e.g., if
4073 we hit an internal solib event). Re-fetch it. */
4074 frame = get_current_frame ();
4075 gdbarch = get_frame_arch (frame);
4076
4077 switch (what.main_action)
4078 {
4079 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4080 /* If we hit the breakpoint at longjmp while stepping, we
4081 install a momentary breakpoint at the target of the
4082 jmp_buf. */
4083
4084 if (debug_infrun)
4085 fprintf_unfiltered (gdb_stdlog,
4086 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4087
4088 ecs->event_thread->stepping_over_breakpoint = 1;
4089
4090 if (!gdbarch_get_longjmp_target_p (gdbarch)
4091 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
4092 {
4093 if (debug_infrun)
4094 fprintf_unfiltered (gdb_stdlog, "\
4095 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4096 keep_going (ecs);
4097 return;
4098 }
4099
4100 /* We're going to replace the current step-resume breakpoint
4101 with a longjmp-resume breakpoint. */
4102 delete_step_resume_breakpoint (ecs->event_thread);
4103
4104 /* Insert a breakpoint at resume address. */
4105 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4106
4107 keep_going (ecs);
4108 return;
4109
4110 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4111 if (debug_infrun)
4112 fprintf_unfiltered (gdb_stdlog,
4113 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4114
4115 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4116 delete_step_resume_breakpoint (ecs->event_thread);
4117
4118 ecs->event_thread->stop_step = 1;
4119 print_end_stepping_range_reason ();
4120 stop_stepping (ecs);
4121 return;
4122
4123 case BPSTAT_WHAT_SINGLE:
4124 if (debug_infrun)
4125 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4126 ecs->event_thread->stepping_over_breakpoint = 1;
4127 /* Still need to check other stuff, at least the case
4128 where we are stepping and step out of the right range. */
4129 break;
4130
4131 case BPSTAT_WHAT_STOP_NOISY:
4132 if (debug_infrun)
4133 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4134 stop_print_frame = 1;
4135
4136 /* We are about to nuke the step_resume_breakpointt via the
4137 cleanup chain, so no need to worry about it here. */
4138
4139 stop_stepping (ecs);
4140 return;
4141
4142 case BPSTAT_WHAT_STOP_SILENT:
4143 if (debug_infrun)
4144 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4145 stop_print_frame = 0;
4146
4147 /* We are about to nuke the step_resume_breakpoin via the
4148 cleanup chain, so no need to worry about it here. */
4149
4150 stop_stepping (ecs);
4151 return;
4152
4153 case BPSTAT_WHAT_STEP_RESUME:
4154 if (debug_infrun)
4155 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4156
4157 delete_step_resume_breakpoint (ecs->event_thread);
4158 if (ecs->event_thread->step_after_step_resume_breakpoint)
4159 {
4160 /* Back when the step-resume breakpoint was inserted, we
4161 were trying to single-step off a breakpoint. Go back
4162 to doing that. */
4163 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4164 ecs->event_thread->stepping_over_breakpoint = 1;
4165 keep_going (ecs);
4166 return;
4167 }
4168 if (stop_pc == ecs->stop_func_start
4169 && execution_direction == EXEC_REVERSE)
4170 {
4171 /* We are stepping over a function call in reverse, and
4172 just hit the step-resume breakpoint at the start
4173 address of the function. Go back to single-stepping,
4174 which should take us back to the function call. */
4175 ecs->event_thread->stepping_over_breakpoint = 1;
4176 keep_going (ecs);
4177 return;
4178 }
4179 break;
4180
4181 case BPSTAT_WHAT_KEEP_CHECKING:
4182 break;
4183 }
4184 }
4185
4186 /* We come here if we hit a breakpoint but should not
4187 stop for it. Possibly we also were stepping
4188 and should stop for that. So fall through and
4189 test for stepping. But, if not stepping,
4190 do not stop. */
4191
4192 /* In all-stop mode, if we're currently stepping but have stopped in
4193 some other thread, we need to switch back to the stepped thread. */
4194 if (!non_stop)
4195 {
4196 struct thread_info *tp;
4197
4198 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4199 ecs->event_thread);
4200 if (tp)
4201 {
4202 /* However, if the current thread is blocked on some internal
4203 breakpoint, and we simply need to step over that breakpoint
4204 to get it going again, do that first. */
4205 if ((ecs->event_thread->trap_expected
4206 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4207 || ecs->event_thread->stepping_over_breakpoint)
4208 {
4209 keep_going (ecs);
4210 return;
4211 }
4212
4213 /* If the stepping thread exited, then don't try to switch
4214 back and resume it, which could fail in several different
4215 ways depending on the target. Instead, just keep going.
4216
4217 We can find a stepping dead thread in the thread list in
4218 two cases:
4219
4220 - The target supports thread exit events, and when the
4221 target tries to delete the thread from the thread list,
4222 inferior_ptid pointed at the exiting thread. In such
4223 case, calling delete_thread does not really remove the
4224 thread from the list; instead, the thread is left listed,
4225 with 'exited' state.
4226
4227 - The target's debug interface does not support thread
4228 exit events, and so we have no idea whatsoever if the
4229 previously stepping thread is still alive. For that
4230 reason, we need to synchronously query the target
4231 now. */
4232 if (is_exited (tp->ptid)
4233 || !target_thread_alive (tp->ptid))
4234 {
4235 if (debug_infrun)
4236 fprintf_unfiltered (gdb_stdlog, "\
4237 infrun: not switching back to stepped thread, it has vanished\n");
4238
4239 delete_thread (tp->ptid);
4240 keep_going (ecs);
4241 return;
4242 }
4243
4244 /* Otherwise, we no longer expect a trap in the current thread.
4245 Clear the trap_expected flag before switching back -- this is
4246 what keep_going would do as well, if we called it. */
4247 ecs->event_thread->trap_expected = 0;
4248
4249 if (debug_infrun)
4250 fprintf_unfiltered (gdb_stdlog,
4251 "infrun: switching back to stepped thread\n");
4252
4253 ecs->event_thread = tp;
4254 ecs->ptid = tp->ptid;
4255 context_switch (ecs->ptid);
4256 keep_going (ecs);
4257 return;
4258 }
4259 }
4260
4261 /* Are we stepping to get the inferior out of the dynamic linker's
4262 hook (and possibly the dld itself) after catching a shlib
4263 event? */
4264 if (ecs->event_thread->stepping_through_solib_after_catch)
4265 {
4266 #if defined(SOLIB_ADD)
4267 /* Have we reached our destination? If not, keep going. */
4268 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4269 {
4270 if (debug_infrun)
4271 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4272 ecs->event_thread->stepping_over_breakpoint = 1;
4273 keep_going (ecs);
4274 return;
4275 }
4276 #endif
4277 if (debug_infrun)
4278 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4279 /* Else, stop and report the catchpoint(s) whose triggering
4280 caused us to begin stepping. */
4281 ecs->event_thread->stepping_through_solib_after_catch = 0;
4282 bpstat_clear (&ecs->event_thread->stop_bpstat);
4283 ecs->event_thread->stop_bpstat
4284 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4285 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4286 stop_print_frame = 1;
4287 stop_stepping (ecs);
4288 return;
4289 }
4290
4291 if (ecs->event_thread->step_resume_breakpoint)
4292 {
4293 if (debug_infrun)
4294 fprintf_unfiltered (gdb_stdlog,
4295 "infrun: step-resume breakpoint is inserted\n");
4296
4297 /* Having a step-resume breakpoint overrides anything
4298 else having to do with stepping commands until
4299 that breakpoint is reached. */
4300 keep_going (ecs);
4301 return;
4302 }
4303
4304 if (ecs->event_thread->step_range_end == 0)
4305 {
4306 if (debug_infrun)
4307 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4308 /* Likewise if we aren't even stepping. */
4309 keep_going (ecs);
4310 return;
4311 }
4312
4313 /* Re-fetch current thread's frame in case the code above caused
4314 the frame cache to be re-initialized, making our FRAME variable
4315 a dangling pointer. */
4316 frame = get_current_frame ();
4317 gdbarch = get_frame_arch (frame);
4318
4319 /* If stepping through a line, keep going if still within it.
4320
4321 Note that step_range_end is the address of the first instruction
4322 beyond the step range, and NOT the address of the last instruction
4323 within it!
4324
4325 Note also that during reverse execution, we may be stepping
4326 through a function epilogue and therefore must detect when
4327 the current-frame changes in the middle of a line. */
4328
4329 if (stop_pc >= ecs->event_thread->step_range_start
4330 && stop_pc < ecs->event_thread->step_range_end
4331 && (execution_direction != EXEC_REVERSE
4332 || frame_id_eq (get_frame_id (frame),
4333 ecs->event_thread->step_frame_id)))
4334 {
4335 if (debug_infrun)
4336 fprintf_unfiltered
4337 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4338 paddress (gdbarch, ecs->event_thread->step_range_start),
4339 paddress (gdbarch, ecs->event_thread->step_range_end));
4340
4341 /* When stepping backward, stop at beginning of line range
4342 (unless it's the function entry point, in which case
4343 keep going back to the call point). */
4344 if (stop_pc == ecs->event_thread->step_range_start
4345 && stop_pc != ecs->stop_func_start
4346 && execution_direction == EXEC_REVERSE)
4347 {
4348 ecs->event_thread->stop_step = 1;
4349 print_end_stepping_range_reason ();
4350 stop_stepping (ecs);
4351 }
4352 else
4353 keep_going (ecs);
4354
4355 return;
4356 }
4357
4358 /* We stepped out of the stepping range. */
4359
4360 /* If we are stepping at the source level and entered the runtime
4361 loader dynamic symbol resolution code...
4362
4363 EXEC_FORWARD: we keep on single stepping until we exit the run
4364 time loader code and reach the callee's address.
4365
4366 EXEC_REVERSE: we've already executed the callee (backward), and
4367 the runtime loader code is handled just like any other
4368 undebuggable function call. Now we need only keep stepping
4369 backward through the trampoline code, and that's handled further
4370 down, so there is nothing for us to do here. */
4371
4372 if (execution_direction != EXEC_REVERSE
4373 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4374 && in_solib_dynsym_resolve_code (stop_pc))
4375 {
4376 CORE_ADDR pc_after_resolver =
4377 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4378
4379 if (debug_infrun)
4380 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4381
4382 if (pc_after_resolver)
4383 {
4384 /* Set up a step-resume breakpoint at the address
4385 indicated by SKIP_SOLIB_RESOLVER. */
4386 struct symtab_and_line sr_sal;
4387
4388 init_sal (&sr_sal);
4389 sr_sal.pc = pc_after_resolver;
4390 sr_sal.pspace = get_frame_program_space (frame);
4391
4392 insert_step_resume_breakpoint_at_sal (gdbarch,
4393 sr_sal, null_frame_id);
4394 }
4395
4396 keep_going (ecs);
4397 return;
4398 }
4399
4400 if (ecs->event_thread->step_range_end != 1
4401 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4402 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4403 && get_frame_type (frame) == SIGTRAMP_FRAME)
4404 {
4405 if (debug_infrun)
4406 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4407 /* The inferior, while doing a "step" or "next", has ended up in
4408 a signal trampoline (either by a signal being delivered or by
4409 the signal handler returning). Just single-step until the
4410 inferior leaves the trampoline (either by calling the handler
4411 or returning). */
4412 keep_going (ecs);
4413 return;
4414 }
4415
4416 /* Check for subroutine calls. The check for the current frame
4417 equalling the step ID is not necessary - the check of the
4418 previous frame's ID is sufficient - but it is a common case and
4419 cheaper than checking the previous frame's ID.
4420
4421 NOTE: frame_id_eq will never report two invalid frame IDs as
4422 being equal, so to get into this block, both the current and
4423 previous frame must have valid frame IDs. */
4424 /* The outer_frame_id check is a heuristic to detect stepping
4425 through startup code. If we step over an instruction which
4426 sets the stack pointer from an invalid value to a valid value,
4427 we may detect that as a subroutine call from the mythical
4428 "outermost" function. This could be fixed by marking
4429 outermost frames as !stack_p,code_p,special_p. Then the
4430 initial outermost frame, before sp was valid, would
4431 have code_addr == &_start. See the comment in frame_id_eq
4432 for more. */
4433 if (!frame_id_eq (get_stack_frame_id (frame),
4434 ecs->event_thread->step_stack_frame_id)
4435 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4436 ecs->event_thread->step_stack_frame_id)
4437 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4438 outer_frame_id)
4439 || step_start_function != find_pc_function (stop_pc))))
4440 {
4441 CORE_ADDR real_stop_pc;
4442
4443 if (debug_infrun)
4444 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4445
4446 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4447 || ((ecs->event_thread->step_range_end == 1)
4448 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4449 ecs->stop_func_start)))
4450 {
4451 /* I presume that step_over_calls is only 0 when we're
4452 supposed to be stepping at the assembly language level
4453 ("stepi"). Just stop. */
4454 /* Also, maybe we just did a "nexti" inside a prolog, so we
4455 thought it was a subroutine call but it was not. Stop as
4456 well. FENN */
4457 /* And this works the same backward as frontward. MVS */
4458 ecs->event_thread->stop_step = 1;
4459 print_end_stepping_range_reason ();
4460 stop_stepping (ecs);
4461 return;
4462 }
4463
4464 /* Reverse stepping through solib trampolines. */
4465
4466 if (execution_direction == EXEC_REVERSE
4467 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4468 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4469 || (ecs->stop_func_start == 0
4470 && in_solib_dynsym_resolve_code (stop_pc))))
4471 {
4472 /* Any solib trampoline code can be handled in reverse
4473 by simply continuing to single-step. We have already
4474 executed the solib function (backwards), and a few
4475 steps will take us back through the trampoline to the
4476 caller. */
4477 keep_going (ecs);
4478 return;
4479 }
4480
4481 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4482 {
4483 /* We're doing a "next".
4484
4485 Normal (forward) execution: set a breakpoint at the
4486 callee's return address (the address at which the caller
4487 will resume).
4488
4489 Reverse (backward) execution. set the step-resume
4490 breakpoint at the start of the function that we just
4491 stepped into (backwards), and continue to there. When we
4492 get there, we'll need to single-step back to the caller. */
4493
4494 if (execution_direction == EXEC_REVERSE)
4495 {
4496 struct symtab_and_line sr_sal;
4497
4498 /* Normal function call return (static or dynamic). */
4499 init_sal (&sr_sal);
4500 sr_sal.pc = ecs->stop_func_start;
4501 sr_sal.pspace = get_frame_program_space (frame);
4502 insert_step_resume_breakpoint_at_sal (gdbarch,
4503 sr_sal, null_frame_id);
4504 }
4505 else
4506 insert_step_resume_breakpoint_at_caller (frame);
4507
4508 keep_going (ecs);
4509 return;
4510 }
4511
4512 /* If we are in a function call trampoline (a stub between the
4513 calling routine and the real function), locate the real
4514 function. That's what tells us (a) whether we want to step
4515 into it at all, and (b) what prologue we want to run to the
4516 end of, if we do step into it. */
4517 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4518 if (real_stop_pc == 0)
4519 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4520 if (real_stop_pc != 0)
4521 ecs->stop_func_start = real_stop_pc;
4522
4523 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4524 {
4525 struct symtab_and_line sr_sal;
4526
4527 init_sal (&sr_sal);
4528 sr_sal.pc = ecs->stop_func_start;
4529 sr_sal.pspace = get_frame_program_space (frame);
4530
4531 insert_step_resume_breakpoint_at_sal (gdbarch,
4532 sr_sal, null_frame_id);
4533 keep_going (ecs);
4534 return;
4535 }
4536
4537 /* If we have line number information for the function we are
4538 thinking of stepping into, step into it.
4539
4540 If there are several symtabs at that PC (e.g. with include
4541 files), just want to know whether *any* of them have line
4542 numbers. find_pc_line handles this. */
4543 {
4544 struct symtab_and_line tmp_sal;
4545
4546 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4547 tmp_sal.pspace = get_frame_program_space (frame);
4548 if (tmp_sal.line != 0)
4549 {
4550 if (execution_direction == EXEC_REVERSE)
4551 handle_step_into_function_backward (gdbarch, ecs);
4552 else
4553 handle_step_into_function (gdbarch, ecs);
4554 return;
4555 }
4556 }
4557
4558 /* If we have no line number and the step-stop-if-no-debug is
4559 set, we stop the step so that the user has a chance to switch
4560 in assembly mode. */
4561 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4562 && step_stop_if_no_debug)
4563 {
4564 ecs->event_thread->stop_step = 1;
4565 print_end_stepping_range_reason ();
4566 stop_stepping (ecs);
4567 return;
4568 }
4569
4570 if (execution_direction == EXEC_REVERSE)
4571 {
4572 /* Set a breakpoint at callee's start address.
4573 From there we can step once and be back in the caller. */
4574 struct symtab_and_line sr_sal;
4575
4576 init_sal (&sr_sal);
4577 sr_sal.pc = ecs->stop_func_start;
4578 sr_sal.pspace = get_frame_program_space (frame);
4579 insert_step_resume_breakpoint_at_sal (gdbarch,
4580 sr_sal, null_frame_id);
4581 }
4582 else
4583 /* Set a breakpoint at callee's return address (the address
4584 at which the caller will resume). */
4585 insert_step_resume_breakpoint_at_caller (frame);
4586
4587 keep_going (ecs);
4588 return;
4589 }
4590
4591 /* Reverse stepping through solib trampolines. */
4592
4593 if (execution_direction == EXEC_REVERSE
4594 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4595 {
4596 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4597 || (ecs->stop_func_start == 0
4598 && in_solib_dynsym_resolve_code (stop_pc)))
4599 {
4600 /* Any solib trampoline code can be handled in reverse
4601 by simply continuing to single-step. We have already
4602 executed the solib function (backwards), and a few
4603 steps will take us back through the trampoline to the
4604 caller. */
4605 keep_going (ecs);
4606 return;
4607 }
4608 else if (in_solib_dynsym_resolve_code (stop_pc))
4609 {
4610 /* Stepped backward into the solib dynsym resolver.
4611 Set a breakpoint at its start and continue, then
4612 one more step will take us out. */
4613 struct symtab_and_line sr_sal;
4614
4615 init_sal (&sr_sal);
4616 sr_sal.pc = ecs->stop_func_start;
4617 sr_sal.pspace = get_frame_program_space (frame);
4618 insert_step_resume_breakpoint_at_sal (gdbarch,
4619 sr_sal, null_frame_id);
4620 keep_going (ecs);
4621 return;
4622 }
4623 }
4624
4625 /* If we're in the return path from a shared library trampoline,
4626 we want to proceed through the trampoline when stepping. */
4627 if (gdbarch_in_solib_return_trampoline (gdbarch,
4628 stop_pc, ecs->stop_func_name))
4629 {
4630 /* Determine where this trampoline returns. */
4631 CORE_ADDR real_stop_pc;
4632
4633 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4634
4635 if (debug_infrun)
4636 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4637
4638 /* Only proceed through if we know where it's going. */
4639 if (real_stop_pc)
4640 {
4641 /* And put the step-breakpoint there and go until there. */
4642 struct symtab_and_line sr_sal;
4643
4644 init_sal (&sr_sal); /* initialize to zeroes */
4645 sr_sal.pc = real_stop_pc;
4646 sr_sal.section = find_pc_overlay (sr_sal.pc);
4647 sr_sal.pspace = get_frame_program_space (frame);
4648
4649 /* Do not specify what the fp should be when we stop since
4650 on some machines the prologue is where the new fp value
4651 is established. */
4652 insert_step_resume_breakpoint_at_sal (gdbarch,
4653 sr_sal, null_frame_id);
4654
4655 /* Restart without fiddling with the step ranges or
4656 other state. */
4657 keep_going (ecs);
4658 return;
4659 }
4660 }
4661
4662 stop_pc_sal = find_pc_line (stop_pc, 0);
4663
4664 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4665 the trampoline processing logic, however, there are some trampolines
4666 that have no names, so we should do trampoline handling first. */
4667 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4668 && ecs->stop_func_name == NULL
4669 && stop_pc_sal.line == 0)
4670 {
4671 if (debug_infrun)
4672 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4673
4674 /* The inferior just stepped into, or returned to, an
4675 undebuggable function (where there is no debugging information
4676 and no line number corresponding to the address where the
4677 inferior stopped). Since we want to skip this kind of code,
4678 we keep going until the inferior returns from this
4679 function - unless the user has asked us not to (via
4680 set step-mode) or we no longer know how to get back
4681 to the call site. */
4682 if (step_stop_if_no_debug
4683 || !frame_id_p (frame_unwind_caller_id (frame)))
4684 {
4685 /* If we have no line number and the step-stop-if-no-debug
4686 is set, we stop the step so that the user has a chance to
4687 switch in assembly mode. */
4688 ecs->event_thread->stop_step = 1;
4689 print_end_stepping_range_reason ();
4690 stop_stepping (ecs);
4691 return;
4692 }
4693 else
4694 {
4695 /* Set a breakpoint at callee's return address (the address
4696 at which the caller will resume). */
4697 insert_step_resume_breakpoint_at_caller (frame);
4698 keep_going (ecs);
4699 return;
4700 }
4701 }
4702
4703 if (ecs->event_thread->step_range_end == 1)
4704 {
4705 /* It is stepi or nexti. We always want to stop stepping after
4706 one instruction. */
4707 if (debug_infrun)
4708 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4709 ecs->event_thread->stop_step = 1;
4710 print_end_stepping_range_reason ();
4711 stop_stepping (ecs);
4712 return;
4713 }
4714
4715 if (stop_pc_sal.line == 0)
4716 {
4717 /* We have no line number information. That means to stop
4718 stepping (does this always happen right after one instruction,
4719 when we do "s" in a function with no line numbers,
4720 or can this happen as a result of a return or longjmp?). */
4721 if (debug_infrun)
4722 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4723 ecs->event_thread->stop_step = 1;
4724 print_end_stepping_range_reason ();
4725 stop_stepping (ecs);
4726 return;
4727 }
4728
4729 /* Look for "calls" to inlined functions, part one. If the inline
4730 frame machinery detected some skipped call sites, we have entered
4731 a new inline function. */
4732
4733 if (frame_id_eq (get_frame_id (get_current_frame ()),
4734 ecs->event_thread->step_frame_id)
4735 && inline_skipped_frames (ecs->ptid))
4736 {
4737 struct symtab_and_line call_sal;
4738
4739 if (debug_infrun)
4740 fprintf_unfiltered (gdb_stdlog,
4741 "infrun: stepped into inlined function\n");
4742
4743 find_frame_sal (get_current_frame (), &call_sal);
4744
4745 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4746 {
4747 /* For "step", we're going to stop. But if the call site
4748 for this inlined function is on the same source line as
4749 we were previously stepping, go down into the function
4750 first. Otherwise stop at the call site. */
4751
4752 if (call_sal.line == ecs->event_thread->current_line
4753 && call_sal.symtab == ecs->event_thread->current_symtab)
4754 step_into_inline_frame (ecs->ptid);
4755
4756 ecs->event_thread->stop_step = 1;
4757 print_end_stepping_range_reason ();
4758 stop_stepping (ecs);
4759 return;
4760 }
4761 else
4762 {
4763 /* For "next", we should stop at the call site if it is on a
4764 different source line. Otherwise continue through the
4765 inlined function. */
4766 if (call_sal.line == ecs->event_thread->current_line
4767 && call_sal.symtab == ecs->event_thread->current_symtab)
4768 keep_going (ecs);
4769 else
4770 {
4771 ecs->event_thread->stop_step = 1;
4772 print_end_stepping_range_reason ();
4773 stop_stepping (ecs);
4774 }
4775 return;
4776 }
4777 }
4778
4779 /* Look for "calls" to inlined functions, part two. If we are still
4780 in the same real function we were stepping through, but we have
4781 to go further up to find the exact frame ID, we are stepping
4782 through a more inlined call beyond its call site. */
4783
4784 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4785 && !frame_id_eq (get_frame_id (get_current_frame ()),
4786 ecs->event_thread->step_frame_id)
4787 && stepped_in_from (get_current_frame (),
4788 ecs->event_thread->step_frame_id))
4789 {
4790 if (debug_infrun)
4791 fprintf_unfiltered (gdb_stdlog,
4792 "infrun: stepping through inlined function\n");
4793
4794 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4795 keep_going (ecs);
4796 else
4797 {
4798 ecs->event_thread->stop_step = 1;
4799 print_end_stepping_range_reason ();
4800 stop_stepping (ecs);
4801 }
4802 return;
4803 }
4804
4805 if ((stop_pc == stop_pc_sal.pc)
4806 && (ecs->event_thread->current_line != stop_pc_sal.line
4807 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4808 {
4809 /* We are at the start of a different line. So stop. Note that
4810 we don't stop if we step into the middle of a different line.
4811 That is said to make things like for (;;) statements work
4812 better. */
4813 if (debug_infrun)
4814 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4815 ecs->event_thread->stop_step = 1;
4816 print_end_stepping_range_reason ();
4817 stop_stepping (ecs);
4818 return;
4819 }
4820
4821 /* We aren't done stepping.
4822
4823 Optimize by setting the stepping range to the line.
4824 (We might not be in the original line, but if we entered a
4825 new line in mid-statement, we continue stepping. This makes
4826 things like for(;;) statements work better.) */
4827
4828 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4829 ecs->event_thread->step_range_end = stop_pc_sal.end;
4830 set_step_info (frame, stop_pc_sal);
4831
4832 if (debug_infrun)
4833 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4834 keep_going (ecs);
4835 }
4836
4837 /* Is thread TP in the middle of single-stepping? */
4838
4839 static int
4840 currently_stepping (struct thread_info *tp)
4841 {
4842 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4843 || tp->trap_expected
4844 || tp->stepping_through_solib_after_catch
4845 || bpstat_should_step ());
4846 }
4847
4848 /* Returns true if any thread *but* the one passed in "data" is in the
4849 middle of stepping or of handling a "next". */
4850
4851 static int
4852 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4853 {
4854 if (tp == data)
4855 return 0;
4856
4857 return (tp->step_range_end
4858 || tp->trap_expected
4859 || tp->stepping_through_solib_after_catch);
4860 }
4861
4862 /* Inferior has stepped into a subroutine call with source code that
4863 we should not step over. Do step to the first line of code in
4864 it. */
4865
4866 static void
4867 handle_step_into_function (struct gdbarch *gdbarch,
4868 struct execution_control_state *ecs)
4869 {
4870 struct symtab *s;
4871 struct symtab_and_line stop_func_sal, sr_sal;
4872
4873 s = find_pc_symtab (stop_pc);
4874 if (s && s->language != language_asm)
4875 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4876 ecs->stop_func_start);
4877
4878 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4879 /* Use the step_resume_break to step until the end of the prologue,
4880 even if that involves jumps (as it seems to on the vax under
4881 4.2). */
4882 /* If the prologue ends in the middle of a source line, continue to
4883 the end of that source line (if it is still within the function).
4884 Otherwise, just go to end of prologue. */
4885 if (stop_func_sal.end
4886 && stop_func_sal.pc != ecs->stop_func_start
4887 && stop_func_sal.end < ecs->stop_func_end)
4888 ecs->stop_func_start = stop_func_sal.end;
4889
4890 /* Architectures which require breakpoint adjustment might not be able
4891 to place a breakpoint at the computed address. If so, the test
4892 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4893 ecs->stop_func_start to an address at which a breakpoint may be
4894 legitimately placed.
4895
4896 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4897 made, GDB will enter an infinite loop when stepping through
4898 optimized code consisting of VLIW instructions which contain
4899 subinstructions corresponding to different source lines. On
4900 FR-V, it's not permitted to place a breakpoint on any but the
4901 first subinstruction of a VLIW instruction. When a breakpoint is
4902 set, GDB will adjust the breakpoint address to the beginning of
4903 the VLIW instruction. Thus, we need to make the corresponding
4904 adjustment here when computing the stop address. */
4905
4906 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4907 {
4908 ecs->stop_func_start
4909 = gdbarch_adjust_breakpoint_address (gdbarch,
4910 ecs->stop_func_start);
4911 }
4912
4913 if (ecs->stop_func_start == stop_pc)
4914 {
4915 /* We are already there: stop now. */
4916 ecs->event_thread->stop_step = 1;
4917 print_end_stepping_range_reason ();
4918 stop_stepping (ecs);
4919 return;
4920 }
4921 else
4922 {
4923 /* Put the step-breakpoint there and go until there. */
4924 init_sal (&sr_sal); /* initialize to zeroes */
4925 sr_sal.pc = ecs->stop_func_start;
4926 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4927 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4928
4929 /* Do not specify what the fp should be when we stop since on
4930 some machines the prologue is where the new fp value is
4931 established. */
4932 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4933
4934 /* And make sure stepping stops right away then. */
4935 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4936 }
4937 keep_going (ecs);
4938 }
4939
4940 /* Inferior has stepped backward into a subroutine call with source
4941 code that we should not step over. Do step to the beginning of the
4942 last line of code in it. */
4943
4944 static void
4945 handle_step_into_function_backward (struct gdbarch *gdbarch,
4946 struct execution_control_state *ecs)
4947 {
4948 struct symtab *s;
4949 struct symtab_and_line stop_func_sal;
4950
4951 s = find_pc_symtab (stop_pc);
4952 if (s && s->language != language_asm)
4953 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4954 ecs->stop_func_start);
4955
4956 stop_func_sal = find_pc_line (stop_pc, 0);
4957
4958 /* OK, we're just going to keep stepping here. */
4959 if (stop_func_sal.pc == stop_pc)
4960 {
4961 /* We're there already. Just stop stepping now. */
4962 ecs->event_thread->stop_step = 1;
4963 print_end_stepping_range_reason ();
4964 stop_stepping (ecs);
4965 }
4966 else
4967 {
4968 /* Else just reset the step range and keep going.
4969 No step-resume breakpoint, they don't work for
4970 epilogues, which can have multiple entry paths. */
4971 ecs->event_thread->step_range_start = stop_func_sal.pc;
4972 ecs->event_thread->step_range_end = stop_func_sal.end;
4973 keep_going (ecs);
4974 }
4975 return;
4976 }
4977
4978 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4979 This is used to both functions and to skip over code. */
4980
4981 static void
4982 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4983 struct symtab_and_line sr_sal,
4984 struct frame_id sr_id)
4985 {
4986 /* There should never be more than one step-resume or longjmp-resume
4987 breakpoint per thread, so we should never be setting a new
4988 step_resume_breakpoint when one is already active. */
4989 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4990
4991 if (debug_infrun)
4992 fprintf_unfiltered (gdb_stdlog,
4993 "infrun: inserting step-resume breakpoint at %s\n",
4994 paddress (gdbarch, sr_sal.pc));
4995
4996 inferior_thread ()->step_resume_breakpoint
4997 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4998 }
4999
5000 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
5001 to skip a potential signal handler.
5002
5003 This is called with the interrupted function's frame. The signal
5004 handler, when it returns, will resume the interrupted function at
5005 RETURN_FRAME.pc. */
5006
5007 static void
5008 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5009 {
5010 struct symtab_and_line sr_sal;
5011 struct gdbarch *gdbarch;
5012
5013 gdb_assert (return_frame != NULL);
5014 init_sal (&sr_sal); /* initialize to zeros */
5015
5016 gdbarch = get_frame_arch (return_frame);
5017 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5018 sr_sal.section = find_pc_overlay (sr_sal.pc);
5019 sr_sal.pspace = get_frame_program_space (return_frame);
5020
5021 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5022 get_stack_frame_id (return_frame));
5023 }
5024
5025 /* Similar to insert_step_resume_breakpoint_at_frame, except
5026 but a breakpoint at the previous frame's PC. This is used to
5027 skip a function after stepping into it (for "next" or if the called
5028 function has no debugging information).
5029
5030 The current function has almost always been reached by single
5031 stepping a call or return instruction. NEXT_FRAME belongs to the
5032 current function, and the breakpoint will be set at the caller's
5033 resume address.
5034
5035 This is a separate function rather than reusing
5036 insert_step_resume_breakpoint_at_frame in order to avoid
5037 get_prev_frame, which may stop prematurely (see the implementation
5038 of frame_unwind_caller_id for an example). */
5039
5040 static void
5041 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5042 {
5043 struct symtab_and_line sr_sal;
5044 struct gdbarch *gdbarch;
5045
5046 /* We shouldn't have gotten here if we don't know where the call site
5047 is. */
5048 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5049
5050 init_sal (&sr_sal); /* initialize to zeros */
5051
5052 gdbarch = frame_unwind_caller_arch (next_frame);
5053 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5054 frame_unwind_caller_pc (next_frame));
5055 sr_sal.section = find_pc_overlay (sr_sal.pc);
5056 sr_sal.pspace = frame_unwind_program_space (next_frame);
5057
5058 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5059 frame_unwind_caller_id (next_frame));
5060 }
5061
5062 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5063 new breakpoint at the target of a jmp_buf. The handling of
5064 longjmp-resume uses the same mechanisms used for handling
5065 "step-resume" breakpoints. */
5066
5067 static void
5068 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5069 {
5070 /* There should never be more than one step-resume or longjmp-resume
5071 breakpoint per thread, so we should never be setting a new
5072 longjmp_resume_breakpoint when one is already active. */
5073 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5074
5075 if (debug_infrun)
5076 fprintf_unfiltered (gdb_stdlog,
5077 "infrun: inserting longjmp-resume breakpoint at %s\n",
5078 paddress (gdbarch, pc));
5079
5080 inferior_thread ()->step_resume_breakpoint =
5081 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5082 }
5083
5084 static void
5085 stop_stepping (struct execution_control_state *ecs)
5086 {
5087 if (debug_infrun)
5088 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5089
5090 /* Let callers know we don't want to wait for the inferior anymore. */
5091 ecs->wait_some_more = 0;
5092 }
5093
5094 /* This function handles various cases where we need to continue
5095 waiting for the inferior. */
5096 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5097
5098 static void
5099 keep_going (struct execution_control_state *ecs)
5100 {
5101 /* Make sure normal_stop is called if we get a QUIT handled before
5102 reaching resume. */
5103 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5104
5105 /* Save the pc before execution, to compare with pc after stop. */
5106 ecs->event_thread->prev_pc
5107 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5108
5109 /* If we did not do break;, it means we should keep running the
5110 inferior and not return to debugger. */
5111
5112 if (ecs->event_thread->trap_expected
5113 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5114 {
5115 /* We took a signal (which we are supposed to pass through to
5116 the inferior, else we'd not get here) and we haven't yet
5117 gotten our trap. Simply continue. */
5118
5119 discard_cleanups (old_cleanups);
5120 resume (currently_stepping (ecs->event_thread),
5121 ecs->event_thread->stop_signal);
5122 }
5123 else
5124 {
5125 /* Either the trap was not expected, but we are continuing
5126 anyway (the user asked that this signal be passed to the
5127 child)
5128 -- or --
5129 The signal was SIGTRAP, e.g. it was our signal, but we
5130 decided we should resume from it.
5131
5132 We're going to run this baby now!
5133
5134 Note that insert_breakpoints won't try to re-insert
5135 already inserted breakpoints. Therefore, we don't
5136 care if breakpoints were already inserted, or not. */
5137
5138 if (ecs->event_thread->stepping_over_breakpoint)
5139 {
5140 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5141
5142 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5143 /* Since we can't do a displaced step, we have to remove
5144 the breakpoint while we step it. To keep things
5145 simple, we remove them all. */
5146 remove_breakpoints ();
5147 }
5148 else
5149 {
5150 struct gdb_exception e;
5151
5152 /* Stop stepping when inserting breakpoints
5153 has failed. */
5154 TRY_CATCH (e, RETURN_MASK_ERROR)
5155 {
5156 insert_breakpoints ();
5157 }
5158 if (e.reason < 0)
5159 {
5160 exception_print (gdb_stderr, e);
5161 stop_stepping (ecs);
5162 return;
5163 }
5164 }
5165
5166 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5167
5168 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5169 specifies that such a signal should be delivered to the
5170 target program).
5171
5172 Typically, this would occure when a user is debugging a
5173 target monitor on a simulator: the target monitor sets a
5174 breakpoint; the simulator encounters this break-point and
5175 halts the simulation handing control to GDB; GDB, noteing
5176 that the break-point isn't valid, returns control back to the
5177 simulator; the simulator then delivers the hardware
5178 equivalent of a SIGNAL_TRAP to the program being debugged. */
5179
5180 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5181 && !signal_program[ecs->event_thread->stop_signal])
5182 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5183
5184 discard_cleanups (old_cleanups);
5185 resume (currently_stepping (ecs->event_thread),
5186 ecs->event_thread->stop_signal);
5187 }
5188
5189 prepare_to_wait (ecs);
5190 }
5191
5192 /* This function normally comes after a resume, before
5193 handle_inferior_event exits. It takes care of any last bits of
5194 housekeeping, and sets the all-important wait_some_more flag. */
5195
5196 static void
5197 prepare_to_wait (struct execution_control_state *ecs)
5198 {
5199 if (debug_infrun)
5200 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5201
5202 /* This is the old end of the while loop. Let everybody know we
5203 want to wait for the inferior some more and get called again
5204 soon. */
5205 ecs->wait_some_more = 1;
5206 }
5207
5208 /* Several print_*_reason functions to print why the inferior has stopped.
5209 We always print something when the inferior exits, or receives a signal.
5210 The rest of the cases are dealt with later on in normal_stop and
5211 print_it_typical. Ideally there should be a call to one of these
5212 print_*_reason functions functions from handle_inferior_event each time
5213 stop_stepping is called. */
5214
5215 /* Print why the inferior has stopped.
5216 We are done with a step/next/si/ni command, print why the inferior has
5217 stopped. For now print nothing. Print a message only if not in the middle
5218 of doing a "step n" operation for n > 1. */
5219
5220 static void
5221 print_end_stepping_range_reason (void)
5222 {
5223 if ((!inferior_thread ()->step_multi || !inferior_thread ()->stop_step)
5224 && ui_out_is_mi_like_p (uiout))
5225 ui_out_field_string (uiout, "reason",
5226 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5227 }
5228
5229 /* The inferior was terminated by a signal, print why it stopped. */
5230
5231 static void
5232 print_signal_exited_reason (enum target_signal siggnal)
5233 {
5234 annotate_signalled ();
5235 if (ui_out_is_mi_like_p (uiout))
5236 ui_out_field_string
5237 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5238 ui_out_text (uiout, "\nProgram terminated with signal ");
5239 annotate_signal_name ();
5240 ui_out_field_string (uiout, "signal-name",
5241 target_signal_to_name (siggnal));
5242 annotate_signal_name_end ();
5243 ui_out_text (uiout, ", ");
5244 annotate_signal_string ();
5245 ui_out_field_string (uiout, "signal-meaning",
5246 target_signal_to_string (siggnal));
5247 annotate_signal_string_end ();
5248 ui_out_text (uiout, ".\n");
5249 ui_out_text (uiout, "The program no longer exists.\n");
5250 }
5251
5252 /* The inferior program is finished, print why it stopped. */
5253
5254 static void
5255 print_exited_reason (int exitstatus)
5256 {
5257 annotate_exited (exitstatus);
5258 if (exitstatus)
5259 {
5260 if (ui_out_is_mi_like_p (uiout))
5261 ui_out_field_string (uiout, "reason",
5262 async_reason_lookup (EXEC_ASYNC_EXITED));
5263 ui_out_text (uiout, "\nProgram exited with code ");
5264 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5265 ui_out_text (uiout, ".\n");
5266 }
5267 else
5268 {
5269 if (ui_out_is_mi_like_p (uiout))
5270 ui_out_field_string
5271 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5272 ui_out_text (uiout, "\nProgram exited normally.\n");
5273 }
5274 /* Support the --return-child-result option. */
5275 return_child_result_value = exitstatus;
5276 }
5277
5278 /* Signal received, print why the inferior has stopped. The signal table
5279 tells us to print about it. */
5280
5281 static void
5282 print_signal_received_reason (enum target_signal siggnal)
5283 {
5284 annotate_signal ();
5285
5286 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5287 {
5288 struct thread_info *t = inferior_thread ();
5289
5290 ui_out_text (uiout, "\n[");
5291 ui_out_field_string (uiout, "thread-name",
5292 target_pid_to_str (t->ptid));
5293 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5294 ui_out_text (uiout, " stopped");
5295 }
5296 else
5297 {
5298 ui_out_text (uiout, "\nProgram received signal ");
5299 annotate_signal_name ();
5300 if (ui_out_is_mi_like_p (uiout))
5301 ui_out_field_string
5302 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5303 ui_out_field_string (uiout, "signal-name",
5304 target_signal_to_name (siggnal));
5305 annotate_signal_name_end ();
5306 ui_out_text (uiout, ", ");
5307 annotate_signal_string ();
5308 ui_out_field_string (uiout, "signal-meaning",
5309 target_signal_to_string (siggnal));
5310 annotate_signal_string_end ();
5311 }
5312 ui_out_text (uiout, ".\n");
5313 }
5314
5315 /* Reverse execution: target ran out of history info, print why the inferior
5316 has stopped. */
5317
5318 static void
5319 print_no_history_reason (void)
5320 {
5321 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5322 }
5323
5324 /* Here to return control to GDB when the inferior stops for real.
5325 Print appropriate messages, remove breakpoints, give terminal our modes.
5326
5327 STOP_PRINT_FRAME nonzero means print the executing frame
5328 (pc, function, args, file, line number and line text).
5329 BREAKPOINTS_FAILED nonzero means stop was due to error
5330 attempting to insert breakpoints. */
5331
5332 void
5333 normal_stop (void)
5334 {
5335 struct target_waitstatus last;
5336 ptid_t last_ptid;
5337 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5338
5339 get_last_target_status (&last_ptid, &last);
5340
5341 /* If an exception is thrown from this point on, make sure to
5342 propagate GDB's knowledge of the executing state to the
5343 frontend/user running state. A QUIT is an easy exception to see
5344 here, so do this before any filtered output. */
5345 if (!non_stop)
5346 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5347 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5348 && last.kind != TARGET_WAITKIND_EXITED)
5349 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5350
5351 /* In non-stop mode, we don't want GDB to switch threads behind the
5352 user's back, to avoid races where the user is typing a command to
5353 apply to thread x, but GDB switches to thread y before the user
5354 finishes entering the command. */
5355
5356 /* As with the notification of thread events, we want to delay
5357 notifying the user that we've switched thread context until
5358 the inferior actually stops.
5359
5360 There's no point in saying anything if the inferior has exited.
5361 Note that SIGNALLED here means "exited with a signal", not
5362 "received a signal". */
5363 if (!non_stop
5364 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5365 && target_has_execution
5366 && last.kind != TARGET_WAITKIND_SIGNALLED
5367 && last.kind != TARGET_WAITKIND_EXITED)
5368 {
5369 target_terminal_ours_for_output ();
5370 printf_filtered (_("[Switching to %s]\n"),
5371 target_pid_to_str (inferior_ptid));
5372 annotate_thread_changed ();
5373 previous_inferior_ptid = inferior_ptid;
5374 }
5375
5376 if (!breakpoints_always_inserted_mode () && target_has_execution)
5377 {
5378 if (remove_breakpoints ())
5379 {
5380 target_terminal_ours_for_output ();
5381 printf_filtered (_("\
5382 Cannot remove breakpoints because program is no longer writable.\n\
5383 Further execution is probably impossible.\n"));
5384 }
5385 }
5386
5387 /* If an auto-display called a function and that got a signal,
5388 delete that auto-display to avoid an infinite recursion. */
5389
5390 if (stopped_by_random_signal)
5391 disable_current_display ();
5392
5393 /* Don't print a message if in the middle of doing a "step n"
5394 operation for n > 1 */
5395 if (target_has_execution
5396 && last.kind != TARGET_WAITKIND_SIGNALLED
5397 && last.kind != TARGET_WAITKIND_EXITED
5398 && inferior_thread ()->step_multi
5399 && inferior_thread ()->stop_step)
5400 goto done;
5401
5402 target_terminal_ours ();
5403
5404 /* Set the current source location. This will also happen if we
5405 display the frame below, but the current SAL will be incorrect
5406 during a user hook-stop function. */
5407 if (has_stack_frames () && !stop_stack_dummy)
5408 set_current_sal_from_frame (get_current_frame (), 1);
5409
5410 /* Let the user/frontend see the threads as stopped. */
5411 do_cleanups (old_chain);
5412
5413 /* Look up the hook_stop and run it (CLI internally handles problem
5414 of stop_command's pre-hook not existing). */
5415 if (stop_command)
5416 catch_errors (hook_stop_stub, stop_command,
5417 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5418
5419 if (!has_stack_frames ())
5420 goto done;
5421
5422 if (last.kind == TARGET_WAITKIND_SIGNALLED
5423 || last.kind == TARGET_WAITKIND_EXITED)
5424 goto done;
5425
5426 /* Select innermost stack frame - i.e., current frame is frame 0,
5427 and current location is based on that.
5428 Don't do this on return from a stack dummy routine,
5429 or if the program has exited. */
5430
5431 if (!stop_stack_dummy)
5432 {
5433 select_frame (get_current_frame ());
5434
5435 /* Print current location without a level number, if
5436 we have changed functions or hit a breakpoint.
5437 Print source line if we have one.
5438 bpstat_print() contains the logic deciding in detail
5439 what to print, based on the event(s) that just occurred. */
5440
5441 /* If --batch-silent is enabled then there's no need to print the current
5442 source location, and to try risks causing an error message about
5443 missing source files. */
5444 if (stop_print_frame && !batch_silent)
5445 {
5446 int bpstat_ret;
5447 int source_flag;
5448 int do_frame_printing = 1;
5449 struct thread_info *tp = inferior_thread ();
5450
5451 bpstat_ret = bpstat_print (tp->stop_bpstat);
5452 switch (bpstat_ret)
5453 {
5454 case PRINT_UNKNOWN:
5455 /* If we had hit a shared library event breakpoint,
5456 bpstat_print would print out this message. If we hit
5457 an OS-level shared library event, do the same
5458 thing. */
5459 if (last.kind == TARGET_WAITKIND_LOADED)
5460 {
5461 printf_filtered (_("Stopped due to shared library event\n"));
5462 source_flag = SRC_LINE; /* something bogus */
5463 do_frame_printing = 0;
5464 break;
5465 }
5466
5467 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5468 (or should) carry around the function and does (or
5469 should) use that when doing a frame comparison. */
5470 if (tp->stop_step
5471 && frame_id_eq (tp->step_frame_id,
5472 get_frame_id (get_current_frame ()))
5473 && step_start_function == find_pc_function (stop_pc))
5474 source_flag = SRC_LINE; /* finished step, just print source line */
5475 else
5476 source_flag = SRC_AND_LOC; /* print location and source line */
5477 break;
5478 case PRINT_SRC_AND_LOC:
5479 source_flag = SRC_AND_LOC; /* print location and source line */
5480 break;
5481 case PRINT_SRC_ONLY:
5482 source_flag = SRC_LINE;
5483 break;
5484 case PRINT_NOTHING:
5485 source_flag = SRC_LINE; /* something bogus */
5486 do_frame_printing = 0;
5487 break;
5488 default:
5489 internal_error (__FILE__, __LINE__, _("Unknown value."));
5490 }
5491
5492 /* The behavior of this routine with respect to the source
5493 flag is:
5494 SRC_LINE: Print only source line
5495 LOCATION: Print only location
5496 SRC_AND_LOC: Print location and source line */
5497 if (do_frame_printing)
5498 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5499
5500 /* Display the auto-display expressions. */
5501 do_displays ();
5502 }
5503 }
5504
5505 /* Save the function value return registers, if we care.
5506 We might be about to restore their previous contents. */
5507 if (inferior_thread ()->proceed_to_finish)
5508 {
5509 /* This should not be necessary. */
5510 if (stop_registers)
5511 regcache_xfree (stop_registers);
5512
5513 /* NB: The copy goes through to the target picking up the value of
5514 all the registers. */
5515 stop_registers = regcache_dup (get_current_regcache ());
5516 }
5517
5518 if (stop_stack_dummy == STOP_STACK_DUMMY)
5519 {
5520 /* Pop the empty frame that contains the stack dummy.
5521 This also restores inferior state prior to the call
5522 (struct inferior_thread_state). */
5523 struct frame_info *frame = get_current_frame ();
5524
5525 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5526 frame_pop (frame);
5527 /* frame_pop() calls reinit_frame_cache as the last thing it does
5528 which means there's currently no selected frame. We don't need
5529 to re-establish a selected frame if the dummy call returns normally,
5530 that will be done by restore_inferior_status. However, we do have
5531 to handle the case where the dummy call is returning after being
5532 stopped (e.g. the dummy call previously hit a breakpoint). We
5533 can't know which case we have so just always re-establish a
5534 selected frame here. */
5535 select_frame (get_current_frame ());
5536 }
5537
5538 done:
5539 annotate_stopped ();
5540
5541 /* Suppress the stop observer if we're in the middle of:
5542
5543 - a step n (n > 1), as there still more steps to be done.
5544
5545 - a "finish" command, as the observer will be called in
5546 finish_command_continuation, so it can include the inferior
5547 function's return value.
5548
5549 - calling an inferior function, as we pretend we inferior didn't
5550 run at all. The return value of the call is handled by the
5551 expression evaluator, through call_function_by_hand. */
5552
5553 if (!target_has_execution
5554 || last.kind == TARGET_WAITKIND_SIGNALLED
5555 || last.kind == TARGET_WAITKIND_EXITED
5556 || (!inferior_thread ()->step_multi
5557 && !(inferior_thread ()->stop_bpstat
5558 && inferior_thread ()->proceed_to_finish)
5559 && !inferior_thread ()->in_infcall))
5560 {
5561 if (!ptid_equal (inferior_ptid, null_ptid))
5562 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5563 stop_print_frame);
5564 else
5565 observer_notify_normal_stop (NULL, stop_print_frame);
5566 }
5567
5568 if (target_has_execution)
5569 {
5570 if (last.kind != TARGET_WAITKIND_SIGNALLED
5571 && last.kind != TARGET_WAITKIND_EXITED)
5572 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5573 Delete any breakpoint that is to be deleted at the next stop. */
5574 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5575 }
5576
5577 /* Try to get rid of automatically added inferiors that are no
5578 longer needed. Keeping those around slows down things linearly.
5579 Note that this never removes the current inferior. */
5580 prune_inferiors ();
5581 }
5582
5583 static int
5584 hook_stop_stub (void *cmd)
5585 {
5586 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5587 return (0);
5588 }
5589 \f
5590 int
5591 signal_stop_state (int signo)
5592 {
5593 return signal_stop[signo];
5594 }
5595
5596 int
5597 signal_print_state (int signo)
5598 {
5599 return signal_print[signo];
5600 }
5601
5602 int
5603 signal_pass_state (int signo)
5604 {
5605 return signal_program[signo];
5606 }
5607
5608 int
5609 signal_stop_update (int signo, int state)
5610 {
5611 int ret = signal_stop[signo];
5612
5613 signal_stop[signo] = state;
5614 return ret;
5615 }
5616
5617 int
5618 signal_print_update (int signo, int state)
5619 {
5620 int ret = signal_print[signo];
5621
5622 signal_print[signo] = state;
5623 return ret;
5624 }
5625
5626 int
5627 signal_pass_update (int signo, int state)
5628 {
5629 int ret = signal_program[signo];
5630
5631 signal_program[signo] = state;
5632 return ret;
5633 }
5634
5635 static void
5636 sig_print_header (void)
5637 {
5638 printf_filtered (_("\
5639 Signal Stop\tPrint\tPass to program\tDescription\n"));
5640 }
5641
5642 static void
5643 sig_print_info (enum target_signal oursig)
5644 {
5645 const char *name = target_signal_to_name (oursig);
5646 int name_padding = 13 - strlen (name);
5647
5648 if (name_padding <= 0)
5649 name_padding = 0;
5650
5651 printf_filtered ("%s", name);
5652 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5653 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5654 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5655 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5656 printf_filtered ("%s\n", target_signal_to_string (oursig));
5657 }
5658
5659 /* Specify how various signals in the inferior should be handled. */
5660
5661 static void
5662 handle_command (char *args, int from_tty)
5663 {
5664 char **argv;
5665 int digits, wordlen;
5666 int sigfirst, signum, siglast;
5667 enum target_signal oursig;
5668 int allsigs;
5669 int nsigs;
5670 unsigned char *sigs;
5671 struct cleanup *old_chain;
5672
5673 if (args == NULL)
5674 {
5675 error_no_arg (_("signal to handle"));
5676 }
5677
5678 /* Allocate and zero an array of flags for which signals to handle. */
5679
5680 nsigs = (int) TARGET_SIGNAL_LAST;
5681 sigs = (unsigned char *) alloca (nsigs);
5682 memset (sigs, 0, nsigs);
5683
5684 /* Break the command line up into args. */
5685
5686 argv = gdb_buildargv (args);
5687 old_chain = make_cleanup_freeargv (argv);
5688
5689 /* Walk through the args, looking for signal oursigs, signal names, and
5690 actions. Signal numbers and signal names may be interspersed with
5691 actions, with the actions being performed for all signals cumulatively
5692 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5693
5694 while (*argv != NULL)
5695 {
5696 wordlen = strlen (*argv);
5697 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5698 {;
5699 }
5700 allsigs = 0;
5701 sigfirst = siglast = -1;
5702
5703 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5704 {
5705 /* Apply action to all signals except those used by the
5706 debugger. Silently skip those. */
5707 allsigs = 1;
5708 sigfirst = 0;
5709 siglast = nsigs - 1;
5710 }
5711 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5712 {
5713 SET_SIGS (nsigs, sigs, signal_stop);
5714 SET_SIGS (nsigs, sigs, signal_print);
5715 }
5716 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5717 {
5718 UNSET_SIGS (nsigs, sigs, signal_program);
5719 }
5720 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5721 {
5722 SET_SIGS (nsigs, sigs, signal_print);
5723 }
5724 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5725 {
5726 SET_SIGS (nsigs, sigs, signal_program);
5727 }
5728 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5729 {
5730 UNSET_SIGS (nsigs, sigs, signal_stop);
5731 }
5732 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5733 {
5734 SET_SIGS (nsigs, sigs, signal_program);
5735 }
5736 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5737 {
5738 UNSET_SIGS (nsigs, sigs, signal_print);
5739 UNSET_SIGS (nsigs, sigs, signal_stop);
5740 }
5741 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5742 {
5743 UNSET_SIGS (nsigs, sigs, signal_program);
5744 }
5745 else if (digits > 0)
5746 {
5747 /* It is numeric. The numeric signal refers to our own
5748 internal signal numbering from target.h, not to host/target
5749 signal number. This is a feature; users really should be
5750 using symbolic names anyway, and the common ones like
5751 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5752
5753 sigfirst = siglast = (int)
5754 target_signal_from_command (atoi (*argv));
5755 if ((*argv)[digits] == '-')
5756 {
5757 siglast = (int)
5758 target_signal_from_command (atoi ((*argv) + digits + 1));
5759 }
5760 if (sigfirst > siglast)
5761 {
5762 /* Bet he didn't figure we'd think of this case... */
5763 signum = sigfirst;
5764 sigfirst = siglast;
5765 siglast = signum;
5766 }
5767 }
5768 else
5769 {
5770 oursig = target_signal_from_name (*argv);
5771 if (oursig != TARGET_SIGNAL_UNKNOWN)
5772 {
5773 sigfirst = siglast = (int) oursig;
5774 }
5775 else
5776 {
5777 /* Not a number and not a recognized flag word => complain. */
5778 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5779 }
5780 }
5781
5782 /* If any signal numbers or symbol names were found, set flags for
5783 which signals to apply actions to. */
5784
5785 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5786 {
5787 switch ((enum target_signal) signum)
5788 {
5789 case TARGET_SIGNAL_TRAP:
5790 case TARGET_SIGNAL_INT:
5791 if (!allsigs && !sigs[signum])
5792 {
5793 if (query (_("%s is used by the debugger.\n\
5794 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5795 {
5796 sigs[signum] = 1;
5797 }
5798 else
5799 {
5800 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5801 gdb_flush (gdb_stdout);
5802 }
5803 }
5804 break;
5805 case TARGET_SIGNAL_0:
5806 case TARGET_SIGNAL_DEFAULT:
5807 case TARGET_SIGNAL_UNKNOWN:
5808 /* Make sure that "all" doesn't print these. */
5809 break;
5810 default:
5811 sigs[signum] = 1;
5812 break;
5813 }
5814 }
5815
5816 argv++;
5817 }
5818
5819 for (signum = 0; signum < nsigs; signum++)
5820 if (sigs[signum])
5821 {
5822 target_notice_signals (inferior_ptid);
5823
5824 if (from_tty)
5825 {
5826 /* Show the results. */
5827 sig_print_header ();
5828 for (; signum < nsigs; signum++)
5829 if (sigs[signum])
5830 sig_print_info (signum);
5831 }
5832
5833 break;
5834 }
5835
5836 do_cleanups (old_chain);
5837 }
5838
5839 static void
5840 xdb_handle_command (char *args, int from_tty)
5841 {
5842 char **argv;
5843 struct cleanup *old_chain;
5844
5845 if (args == NULL)
5846 error_no_arg (_("xdb command"));
5847
5848 /* Break the command line up into args. */
5849
5850 argv = gdb_buildargv (args);
5851 old_chain = make_cleanup_freeargv (argv);
5852 if (argv[1] != (char *) NULL)
5853 {
5854 char *argBuf;
5855 int bufLen;
5856
5857 bufLen = strlen (argv[0]) + 20;
5858 argBuf = (char *) xmalloc (bufLen);
5859 if (argBuf)
5860 {
5861 int validFlag = 1;
5862 enum target_signal oursig;
5863
5864 oursig = target_signal_from_name (argv[0]);
5865 memset (argBuf, 0, bufLen);
5866 if (strcmp (argv[1], "Q") == 0)
5867 sprintf (argBuf, "%s %s", argv[0], "noprint");
5868 else
5869 {
5870 if (strcmp (argv[1], "s") == 0)
5871 {
5872 if (!signal_stop[oursig])
5873 sprintf (argBuf, "%s %s", argv[0], "stop");
5874 else
5875 sprintf (argBuf, "%s %s", argv[0], "nostop");
5876 }
5877 else if (strcmp (argv[1], "i") == 0)
5878 {
5879 if (!signal_program[oursig])
5880 sprintf (argBuf, "%s %s", argv[0], "pass");
5881 else
5882 sprintf (argBuf, "%s %s", argv[0], "nopass");
5883 }
5884 else if (strcmp (argv[1], "r") == 0)
5885 {
5886 if (!signal_print[oursig])
5887 sprintf (argBuf, "%s %s", argv[0], "print");
5888 else
5889 sprintf (argBuf, "%s %s", argv[0], "noprint");
5890 }
5891 else
5892 validFlag = 0;
5893 }
5894 if (validFlag)
5895 handle_command (argBuf, from_tty);
5896 else
5897 printf_filtered (_("Invalid signal handling flag.\n"));
5898 if (argBuf)
5899 xfree (argBuf);
5900 }
5901 }
5902 do_cleanups (old_chain);
5903 }
5904
5905 /* Print current contents of the tables set by the handle command.
5906 It is possible we should just be printing signals actually used
5907 by the current target (but for things to work right when switching
5908 targets, all signals should be in the signal tables). */
5909
5910 static void
5911 signals_info (char *signum_exp, int from_tty)
5912 {
5913 enum target_signal oursig;
5914
5915 sig_print_header ();
5916
5917 if (signum_exp)
5918 {
5919 /* First see if this is a symbol name. */
5920 oursig = target_signal_from_name (signum_exp);
5921 if (oursig == TARGET_SIGNAL_UNKNOWN)
5922 {
5923 /* No, try numeric. */
5924 oursig =
5925 target_signal_from_command (parse_and_eval_long (signum_exp));
5926 }
5927 sig_print_info (oursig);
5928 return;
5929 }
5930
5931 printf_filtered ("\n");
5932 /* These ugly casts brought to you by the native VAX compiler. */
5933 for (oursig = TARGET_SIGNAL_FIRST;
5934 (int) oursig < (int) TARGET_SIGNAL_LAST;
5935 oursig = (enum target_signal) ((int) oursig + 1))
5936 {
5937 QUIT;
5938
5939 if (oursig != TARGET_SIGNAL_UNKNOWN
5940 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5941 sig_print_info (oursig);
5942 }
5943
5944 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5945 }
5946
5947 /* The $_siginfo convenience variable is a bit special. We don't know
5948 for sure the type of the value until we actually have a chance to
5949 fetch the data. The type can change depending on gdbarch, so it it
5950 also dependent on which thread you have selected.
5951
5952 1. making $_siginfo be an internalvar that creates a new value on
5953 access.
5954
5955 2. making the value of $_siginfo be an lval_computed value. */
5956
5957 /* This function implements the lval_computed support for reading a
5958 $_siginfo value. */
5959
5960 static void
5961 siginfo_value_read (struct value *v)
5962 {
5963 LONGEST transferred;
5964
5965 transferred =
5966 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5967 NULL,
5968 value_contents_all_raw (v),
5969 value_offset (v),
5970 TYPE_LENGTH (value_type (v)));
5971
5972 if (transferred != TYPE_LENGTH (value_type (v)))
5973 error (_("Unable to read siginfo"));
5974 }
5975
5976 /* This function implements the lval_computed support for writing a
5977 $_siginfo value. */
5978
5979 static void
5980 siginfo_value_write (struct value *v, struct value *fromval)
5981 {
5982 LONGEST transferred;
5983
5984 transferred = target_write (&current_target,
5985 TARGET_OBJECT_SIGNAL_INFO,
5986 NULL,
5987 value_contents_all_raw (fromval),
5988 value_offset (v),
5989 TYPE_LENGTH (value_type (fromval)));
5990
5991 if (transferred != TYPE_LENGTH (value_type (fromval)))
5992 error (_("Unable to write siginfo"));
5993 }
5994
5995 static struct lval_funcs siginfo_value_funcs =
5996 {
5997 siginfo_value_read,
5998 siginfo_value_write
5999 };
6000
6001 /* Return a new value with the correct type for the siginfo object of
6002 the current thread using architecture GDBARCH. Return a void value
6003 if there's no object available. */
6004
6005 static struct value *
6006 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6007 {
6008 if (target_has_stack
6009 && !ptid_equal (inferior_ptid, null_ptid)
6010 && gdbarch_get_siginfo_type_p (gdbarch))
6011 {
6012 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6013
6014 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6015 }
6016
6017 return allocate_value (builtin_type (gdbarch)->builtin_void);
6018 }
6019
6020 \f
6021 /* Inferior thread state.
6022 These are details related to the inferior itself, and don't include
6023 things like what frame the user had selected or what gdb was doing
6024 with the target at the time.
6025 For inferior function calls these are things we want to restore
6026 regardless of whether the function call successfully completes
6027 or the dummy frame has to be manually popped. */
6028
6029 struct inferior_thread_state
6030 {
6031 enum target_signal stop_signal;
6032 CORE_ADDR stop_pc;
6033 struct regcache *registers;
6034 };
6035
6036 struct inferior_thread_state *
6037 save_inferior_thread_state (void)
6038 {
6039 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
6040 struct thread_info *tp = inferior_thread ();
6041
6042 inf_state->stop_signal = tp->stop_signal;
6043 inf_state->stop_pc = stop_pc;
6044
6045 inf_state->registers = regcache_dup (get_current_regcache ());
6046
6047 return inf_state;
6048 }
6049
6050 /* Restore inferior session state to INF_STATE. */
6051
6052 void
6053 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6054 {
6055 struct thread_info *tp = inferior_thread ();
6056
6057 tp->stop_signal = inf_state->stop_signal;
6058 stop_pc = inf_state->stop_pc;
6059
6060 /* The inferior can be gone if the user types "print exit(0)"
6061 (and perhaps other times). */
6062 if (target_has_execution)
6063 /* NB: The register write goes through to the target. */
6064 regcache_cpy (get_current_regcache (), inf_state->registers);
6065 regcache_xfree (inf_state->registers);
6066 xfree (inf_state);
6067 }
6068
6069 static void
6070 do_restore_inferior_thread_state_cleanup (void *state)
6071 {
6072 restore_inferior_thread_state (state);
6073 }
6074
6075 struct cleanup *
6076 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6077 {
6078 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6079 }
6080
6081 void
6082 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6083 {
6084 regcache_xfree (inf_state->registers);
6085 xfree (inf_state);
6086 }
6087
6088 struct regcache *
6089 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6090 {
6091 return inf_state->registers;
6092 }
6093
6094 /* Session related state for inferior function calls.
6095 These are the additional bits of state that need to be restored
6096 when an inferior function call successfully completes. */
6097
6098 struct inferior_status
6099 {
6100 bpstat stop_bpstat;
6101 int stop_step;
6102 enum stop_stack_kind stop_stack_dummy;
6103 int stopped_by_random_signal;
6104 int stepping_over_breakpoint;
6105 CORE_ADDR step_range_start;
6106 CORE_ADDR step_range_end;
6107 struct frame_id step_frame_id;
6108 struct frame_id step_stack_frame_id;
6109 enum step_over_calls_kind step_over_calls;
6110 CORE_ADDR step_resume_break_address;
6111 int stop_after_trap;
6112 int stop_soon;
6113
6114 /* ID if the selected frame when the inferior function call was made. */
6115 struct frame_id selected_frame_id;
6116
6117 int proceed_to_finish;
6118 int in_infcall;
6119 };
6120
6121 /* Save all of the information associated with the inferior<==>gdb
6122 connection. */
6123
6124 struct inferior_status *
6125 save_inferior_status (void)
6126 {
6127 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6128 struct thread_info *tp = inferior_thread ();
6129 struct inferior *inf = current_inferior ();
6130
6131 inf_status->stop_step = tp->stop_step;
6132 inf_status->stop_stack_dummy = stop_stack_dummy;
6133 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6134 inf_status->stepping_over_breakpoint = tp->trap_expected;
6135 inf_status->step_range_start = tp->step_range_start;
6136 inf_status->step_range_end = tp->step_range_end;
6137 inf_status->step_frame_id = tp->step_frame_id;
6138 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6139 inf_status->step_over_calls = tp->step_over_calls;
6140 inf_status->stop_after_trap = stop_after_trap;
6141 inf_status->stop_soon = inf->stop_soon;
6142 /* Save original bpstat chain here; replace it with copy of chain.
6143 If caller's caller is walking the chain, they'll be happier if we
6144 hand them back the original chain when restore_inferior_status is
6145 called. */
6146 inf_status->stop_bpstat = tp->stop_bpstat;
6147 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6148 inf_status->proceed_to_finish = tp->proceed_to_finish;
6149 inf_status->in_infcall = tp->in_infcall;
6150
6151 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6152
6153 return inf_status;
6154 }
6155
6156 static int
6157 restore_selected_frame (void *args)
6158 {
6159 struct frame_id *fid = (struct frame_id *) args;
6160 struct frame_info *frame;
6161
6162 frame = frame_find_by_id (*fid);
6163
6164 /* If inf_status->selected_frame_id is NULL, there was no previously
6165 selected frame. */
6166 if (frame == NULL)
6167 {
6168 warning (_("Unable to restore previously selected frame."));
6169 return 0;
6170 }
6171
6172 select_frame (frame);
6173
6174 return (1);
6175 }
6176
6177 /* Restore inferior session state to INF_STATUS. */
6178
6179 void
6180 restore_inferior_status (struct inferior_status *inf_status)
6181 {
6182 struct thread_info *tp = inferior_thread ();
6183 struct inferior *inf = current_inferior ();
6184
6185 tp->stop_step = inf_status->stop_step;
6186 stop_stack_dummy = inf_status->stop_stack_dummy;
6187 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6188 tp->trap_expected = inf_status->stepping_over_breakpoint;
6189 tp->step_range_start = inf_status->step_range_start;
6190 tp->step_range_end = inf_status->step_range_end;
6191 tp->step_frame_id = inf_status->step_frame_id;
6192 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6193 tp->step_over_calls = inf_status->step_over_calls;
6194 stop_after_trap = inf_status->stop_after_trap;
6195 inf->stop_soon = inf_status->stop_soon;
6196 bpstat_clear (&tp->stop_bpstat);
6197 tp->stop_bpstat = inf_status->stop_bpstat;
6198 inf_status->stop_bpstat = NULL;
6199 tp->proceed_to_finish = inf_status->proceed_to_finish;
6200 tp->in_infcall = inf_status->in_infcall;
6201
6202 if (target_has_stack)
6203 {
6204 /* The point of catch_errors is that if the stack is clobbered,
6205 walking the stack might encounter a garbage pointer and
6206 error() trying to dereference it. */
6207 if (catch_errors
6208 (restore_selected_frame, &inf_status->selected_frame_id,
6209 "Unable to restore previously selected frame:\n",
6210 RETURN_MASK_ERROR) == 0)
6211 /* Error in restoring the selected frame. Select the innermost
6212 frame. */
6213 select_frame (get_current_frame ());
6214 }
6215
6216 xfree (inf_status);
6217 }
6218
6219 static void
6220 do_restore_inferior_status_cleanup (void *sts)
6221 {
6222 restore_inferior_status (sts);
6223 }
6224
6225 struct cleanup *
6226 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6227 {
6228 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6229 }
6230
6231 void
6232 discard_inferior_status (struct inferior_status *inf_status)
6233 {
6234 /* See save_inferior_status for info on stop_bpstat. */
6235 bpstat_clear (&inf_status->stop_bpstat);
6236 xfree (inf_status);
6237 }
6238 \f
6239 int
6240 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6241 {
6242 struct target_waitstatus last;
6243 ptid_t last_ptid;
6244
6245 get_last_target_status (&last_ptid, &last);
6246
6247 if (last.kind != TARGET_WAITKIND_FORKED)
6248 return 0;
6249
6250 if (!ptid_equal (last_ptid, pid))
6251 return 0;
6252
6253 *child_pid = last.value.related_pid;
6254 return 1;
6255 }
6256
6257 int
6258 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6259 {
6260 struct target_waitstatus last;
6261 ptid_t last_ptid;
6262
6263 get_last_target_status (&last_ptid, &last);
6264
6265 if (last.kind != TARGET_WAITKIND_VFORKED)
6266 return 0;
6267
6268 if (!ptid_equal (last_ptid, pid))
6269 return 0;
6270
6271 *child_pid = last.value.related_pid;
6272 return 1;
6273 }
6274
6275 int
6276 inferior_has_execd (ptid_t pid, char **execd_pathname)
6277 {
6278 struct target_waitstatus last;
6279 ptid_t last_ptid;
6280
6281 get_last_target_status (&last_ptid, &last);
6282
6283 if (last.kind != TARGET_WAITKIND_EXECD)
6284 return 0;
6285
6286 if (!ptid_equal (last_ptid, pid))
6287 return 0;
6288
6289 *execd_pathname = xstrdup (last.value.execd_pathname);
6290 return 1;
6291 }
6292
6293 int
6294 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6295 {
6296 struct target_waitstatus last;
6297 ptid_t last_ptid;
6298
6299 get_last_target_status (&last_ptid, &last);
6300
6301 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6302 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6303 return 0;
6304
6305 if (!ptid_equal (last_ptid, pid))
6306 return 0;
6307
6308 *syscall_number = last.value.syscall_number;
6309 return 1;
6310 }
6311
6312 /* Oft used ptids */
6313 ptid_t null_ptid;
6314 ptid_t minus_one_ptid;
6315
6316 /* Create a ptid given the necessary PID, LWP, and TID components. */
6317
6318 ptid_t
6319 ptid_build (int pid, long lwp, long tid)
6320 {
6321 ptid_t ptid;
6322
6323 ptid.pid = pid;
6324 ptid.lwp = lwp;
6325 ptid.tid = tid;
6326 return ptid;
6327 }
6328
6329 /* Create a ptid from just a pid. */
6330
6331 ptid_t
6332 pid_to_ptid (int pid)
6333 {
6334 return ptid_build (pid, 0, 0);
6335 }
6336
6337 /* Fetch the pid (process id) component from a ptid. */
6338
6339 int
6340 ptid_get_pid (ptid_t ptid)
6341 {
6342 return ptid.pid;
6343 }
6344
6345 /* Fetch the lwp (lightweight process) component from a ptid. */
6346
6347 long
6348 ptid_get_lwp (ptid_t ptid)
6349 {
6350 return ptid.lwp;
6351 }
6352
6353 /* Fetch the tid (thread id) component from a ptid. */
6354
6355 long
6356 ptid_get_tid (ptid_t ptid)
6357 {
6358 return ptid.tid;
6359 }
6360
6361 /* ptid_equal() is used to test equality of two ptids. */
6362
6363 int
6364 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6365 {
6366 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6367 && ptid1.tid == ptid2.tid);
6368 }
6369
6370 /* Returns true if PTID represents a process. */
6371
6372 int
6373 ptid_is_pid (ptid_t ptid)
6374 {
6375 if (ptid_equal (minus_one_ptid, ptid))
6376 return 0;
6377 if (ptid_equal (null_ptid, ptid))
6378 return 0;
6379
6380 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6381 }
6382
6383 int
6384 ptid_match (ptid_t ptid, ptid_t filter)
6385 {
6386 /* Since both parameters have the same type, prevent easy mistakes
6387 from happening. */
6388 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6389 && !ptid_equal (ptid, null_ptid));
6390
6391 if (ptid_equal (filter, minus_one_ptid))
6392 return 1;
6393 if (ptid_is_pid (filter)
6394 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6395 return 1;
6396 else if (ptid_equal (ptid, filter))
6397 return 1;
6398
6399 return 0;
6400 }
6401
6402 /* restore_inferior_ptid() will be used by the cleanup machinery
6403 to restore the inferior_ptid value saved in a call to
6404 save_inferior_ptid(). */
6405
6406 static void
6407 restore_inferior_ptid (void *arg)
6408 {
6409 ptid_t *saved_ptid_ptr = arg;
6410
6411 inferior_ptid = *saved_ptid_ptr;
6412 xfree (arg);
6413 }
6414
6415 /* Save the value of inferior_ptid so that it may be restored by a
6416 later call to do_cleanups(). Returns the struct cleanup pointer
6417 needed for later doing the cleanup. */
6418
6419 struct cleanup *
6420 save_inferior_ptid (void)
6421 {
6422 ptid_t *saved_ptid_ptr;
6423
6424 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6425 *saved_ptid_ptr = inferior_ptid;
6426 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6427 }
6428 \f
6429
6430 /* User interface for reverse debugging:
6431 Set exec-direction / show exec-direction commands
6432 (returns error unless target implements to_set_exec_direction method). */
6433
6434 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6435 static const char exec_forward[] = "forward";
6436 static const char exec_reverse[] = "reverse";
6437 static const char *exec_direction = exec_forward;
6438 static const char *exec_direction_names[] = {
6439 exec_forward,
6440 exec_reverse,
6441 NULL
6442 };
6443
6444 static void
6445 set_exec_direction_func (char *args, int from_tty,
6446 struct cmd_list_element *cmd)
6447 {
6448 if (target_can_execute_reverse)
6449 {
6450 if (!strcmp (exec_direction, exec_forward))
6451 execution_direction = EXEC_FORWARD;
6452 else if (!strcmp (exec_direction, exec_reverse))
6453 execution_direction = EXEC_REVERSE;
6454 }
6455 else
6456 {
6457 exec_direction = exec_forward;
6458 error (_("Target does not support this operation."));
6459 }
6460 }
6461
6462 static void
6463 show_exec_direction_func (struct ui_file *out, int from_tty,
6464 struct cmd_list_element *cmd, const char *value)
6465 {
6466 switch (execution_direction) {
6467 case EXEC_FORWARD:
6468 fprintf_filtered (out, _("Forward.\n"));
6469 break;
6470 case EXEC_REVERSE:
6471 fprintf_filtered (out, _("Reverse.\n"));
6472 break;
6473 case EXEC_ERROR:
6474 default:
6475 fprintf_filtered (out,
6476 _("Forward (target `%s' does not support exec-direction).\n"),
6477 target_shortname);
6478 break;
6479 }
6480 }
6481
6482 /* User interface for non-stop mode. */
6483
6484 int non_stop = 0;
6485
6486 static void
6487 set_non_stop (char *args, int from_tty,
6488 struct cmd_list_element *c)
6489 {
6490 if (target_has_execution)
6491 {
6492 non_stop_1 = non_stop;
6493 error (_("Cannot change this setting while the inferior is running."));
6494 }
6495
6496 non_stop = non_stop_1;
6497 }
6498
6499 static void
6500 show_non_stop (struct ui_file *file, int from_tty,
6501 struct cmd_list_element *c, const char *value)
6502 {
6503 fprintf_filtered (file,
6504 _("Controlling the inferior in non-stop mode is %s.\n"),
6505 value);
6506 }
6507
6508 static void
6509 show_schedule_multiple (struct ui_file *file, int from_tty,
6510 struct cmd_list_element *c, const char *value)
6511 {
6512 fprintf_filtered (file, _("\
6513 Resuming the execution of threads of all processes is %s.\n"), value);
6514 }
6515
6516 void
6517 _initialize_infrun (void)
6518 {
6519 int i;
6520 int numsigs;
6521
6522 add_info ("signals", signals_info, _("\
6523 What debugger does when program gets various signals.\n\
6524 Specify a signal as argument to print info on that signal only."));
6525 add_info_alias ("handle", "signals", 0);
6526
6527 add_com ("handle", class_run, handle_command, _("\
6528 Specify how to handle a signal.\n\
6529 Args are signals and actions to apply to those signals.\n\
6530 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6531 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6532 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6533 The special arg \"all\" is recognized to mean all signals except those\n\
6534 used by the debugger, typically SIGTRAP and SIGINT.\n\
6535 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6536 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6537 Stop means reenter debugger if this signal happens (implies print).\n\
6538 Print means print a message if this signal happens.\n\
6539 Pass means let program see this signal; otherwise program doesn't know.\n\
6540 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6541 Pass and Stop may be combined."));
6542 if (xdb_commands)
6543 {
6544 add_com ("lz", class_info, signals_info, _("\
6545 What debugger does when program gets various signals.\n\
6546 Specify a signal as argument to print info on that signal only."));
6547 add_com ("z", class_run, xdb_handle_command, _("\
6548 Specify how to handle a signal.\n\
6549 Args are signals and actions to apply to those signals.\n\
6550 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6551 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6552 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6553 The special arg \"all\" is recognized to mean all signals except those\n\
6554 used by the debugger, typically SIGTRAP and SIGINT.\n\
6555 Recognized actions include \"s\" (toggles between stop and nostop),\n\
6556 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6557 nopass), \"Q\" (noprint)\n\
6558 Stop means reenter debugger if this signal happens (implies print).\n\
6559 Print means print a message if this signal happens.\n\
6560 Pass means let program see this signal; otherwise program doesn't know.\n\
6561 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6562 Pass and Stop may be combined."));
6563 }
6564
6565 if (!dbx_commands)
6566 stop_command = add_cmd ("stop", class_obscure,
6567 not_just_help_class_command, _("\
6568 There is no `stop' command, but you can set a hook on `stop'.\n\
6569 This allows you to set a list of commands to be run each time execution\n\
6570 of the program stops."), &cmdlist);
6571
6572 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6573 Set inferior debugging."), _("\
6574 Show inferior debugging."), _("\
6575 When non-zero, inferior specific debugging is enabled."),
6576 NULL,
6577 show_debug_infrun,
6578 &setdebuglist, &showdebuglist);
6579
6580 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6581 Set displaced stepping debugging."), _("\
6582 Show displaced stepping debugging."), _("\
6583 When non-zero, displaced stepping specific debugging is enabled."),
6584 NULL,
6585 show_debug_displaced,
6586 &setdebuglist, &showdebuglist);
6587
6588 add_setshow_boolean_cmd ("non-stop", no_class,
6589 &non_stop_1, _("\
6590 Set whether gdb controls the inferior in non-stop mode."), _("\
6591 Show whether gdb controls the inferior in non-stop mode."), _("\
6592 When debugging a multi-threaded program and this setting is\n\
6593 off (the default, also called all-stop mode), when one thread stops\n\
6594 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6595 all other threads in the program while you interact with the thread of\n\
6596 interest. When you continue or step a thread, you can allow the other\n\
6597 threads to run, or have them remain stopped, but while you inspect any\n\
6598 thread's state, all threads stop.\n\
6599 \n\
6600 In non-stop mode, when one thread stops, other threads can continue\n\
6601 to run freely. You'll be able to step each thread independently,\n\
6602 leave it stopped or free to run as needed."),
6603 set_non_stop,
6604 show_non_stop,
6605 &setlist,
6606 &showlist);
6607
6608 numsigs = (int) TARGET_SIGNAL_LAST;
6609 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6610 signal_print = (unsigned char *)
6611 xmalloc (sizeof (signal_print[0]) * numsigs);
6612 signal_program = (unsigned char *)
6613 xmalloc (sizeof (signal_program[0]) * numsigs);
6614 for (i = 0; i < numsigs; i++)
6615 {
6616 signal_stop[i] = 1;
6617 signal_print[i] = 1;
6618 signal_program[i] = 1;
6619 }
6620
6621 /* Signals caused by debugger's own actions
6622 should not be given to the program afterwards. */
6623 signal_program[TARGET_SIGNAL_TRAP] = 0;
6624 signal_program[TARGET_SIGNAL_INT] = 0;
6625
6626 /* Signals that are not errors should not normally enter the debugger. */
6627 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6628 signal_print[TARGET_SIGNAL_ALRM] = 0;
6629 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6630 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6631 signal_stop[TARGET_SIGNAL_PROF] = 0;
6632 signal_print[TARGET_SIGNAL_PROF] = 0;
6633 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6634 signal_print[TARGET_SIGNAL_CHLD] = 0;
6635 signal_stop[TARGET_SIGNAL_IO] = 0;
6636 signal_print[TARGET_SIGNAL_IO] = 0;
6637 signal_stop[TARGET_SIGNAL_POLL] = 0;
6638 signal_print[TARGET_SIGNAL_POLL] = 0;
6639 signal_stop[TARGET_SIGNAL_URG] = 0;
6640 signal_print[TARGET_SIGNAL_URG] = 0;
6641 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6642 signal_print[TARGET_SIGNAL_WINCH] = 0;
6643
6644 /* These signals are used internally by user-level thread
6645 implementations. (See signal(5) on Solaris.) Like the above
6646 signals, a healthy program receives and handles them as part of
6647 its normal operation. */
6648 signal_stop[TARGET_SIGNAL_LWP] = 0;
6649 signal_print[TARGET_SIGNAL_LWP] = 0;
6650 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6651 signal_print[TARGET_SIGNAL_WAITING] = 0;
6652 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6653 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6654
6655 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6656 &stop_on_solib_events, _("\
6657 Set stopping for shared library events."), _("\
6658 Show stopping for shared library events."), _("\
6659 If nonzero, gdb will give control to the user when the dynamic linker\n\
6660 notifies gdb of shared library events. The most common event of interest\n\
6661 to the user would be loading/unloading of a new library."),
6662 NULL,
6663 show_stop_on_solib_events,
6664 &setlist, &showlist);
6665
6666 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6667 follow_fork_mode_kind_names,
6668 &follow_fork_mode_string, _("\
6669 Set debugger response to a program call of fork or vfork."), _("\
6670 Show debugger response to a program call of fork or vfork."), _("\
6671 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6672 parent - the original process is debugged after a fork\n\
6673 child - the new process is debugged after a fork\n\
6674 The unfollowed process will continue to run.\n\
6675 By default, the debugger will follow the parent process."),
6676 NULL,
6677 show_follow_fork_mode_string,
6678 &setlist, &showlist);
6679
6680 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6681 follow_exec_mode_names,
6682 &follow_exec_mode_string, _("\
6683 Set debugger response to a program call of exec."), _("\
6684 Show debugger response to a program call of exec."), _("\
6685 An exec call replaces the program image of a process.\n\
6686 \n\
6687 follow-exec-mode can be:\n\
6688 \n\
6689 new - the debugger creates a new inferior and rebinds the process\n\
6690 to this new inferior. The program the process was running before\n\
6691 the exec call can be restarted afterwards by restarting the original\n\
6692 inferior.\n\
6693 \n\
6694 same - the debugger keeps the process bound to the same inferior.\n\
6695 The new executable image replaces the previous executable loaded in\n\
6696 the inferior. Restarting the inferior after the exec call restarts\n\
6697 the executable the process was running after the exec call.\n\
6698 \n\
6699 By default, the debugger will use the same inferior."),
6700 NULL,
6701 show_follow_exec_mode_string,
6702 &setlist, &showlist);
6703
6704 add_setshow_enum_cmd ("scheduler-locking", class_run,
6705 scheduler_enums, &scheduler_mode, _("\
6706 Set mode for locking scheduler during execution."), _("\
6707 Show mode for locking scheduler during execution."), _("\
6708 off == no locking (threads may preempt at any time)\n\
6709 on == full locking (no thread except the current thread may run)\n\
6710 step == scheduler locked during every single-step operation.\n\
6711 In this mode, no other thread may run during a step command.\n\
6712 Other threads may run while stepping over a function call ('next')."),
6713 set_schedlock_func, /* traps on target vector */
6714 show_scheduler_mode,
6715 &setlist, &showlist);
6716
6717 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6718 Set mode for resuming threads of all processes."), _("\
6719 Show mode for resuming threads of all processes."), _("\
6720 When on, execution commands (such as 'continue' or 'next') resume all\n\
6721 threads of all processes. When off (which is the default), execution\n\
6722 commands only resume the threads of the current process. The set of\n\
6723 threads that are resumed is further refined by the scheduler-locking\n\
6724 mode (see help set scheduler-locking)."),
6725 NULL,
6726 show_schedule_multiple,
6727 &setlist, &showlist);
6728
6729 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6730 Set mode of the step operation."), _("\
6731 Show mode of the step operation."), _("\
6732 When set, doing a step over a function without debug line information\n\
6733 will stop at the first instruction of that function. Otherwise, the\n\
6734 function is skipped and the step command stops at a different source line."),
6735 NULL,
6736 show_step_stop_if_no_debug,
6737 &setlist, &showlist);
6738
6739 add_setshow_enum_cmd ("displaced-stepping", class_run,
6740 can_use_displaced_stepping_enum,
6741 &can_use_displaced_stepping, _("\
6742 Set debugger's willingness to use displaced stepping."), _("\
6743 Show debugger's willingness to use displaced stepping."), _("\
6744 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6745 supported by the target architecture. If off, gdb will not use displaced\n\
6746 stepping to step over breakpoints, even if such is supported by the target\n\
6747 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6748 if the target architecture supports it and non-stop mode is active, but will not\n\
6749 use it in all-stop mode (see help set non-stop)."),
6750 NULL,
6751 show_can_use_displaced_stepping,
6752 &setlist, &showlist);
6753
6754 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6755 &exec_direction, _("Set direction of execution.\n\
6756 Options are 'forward' or 'reverse'."),
6757 _("Show direction of execution (forward/reverse)."),
6758 _("Tells gdb whether to execute forward or backward."),
6759 set_exec_direction_func, show_exec_direction_func,
6760 &setlist, &showlist);
6761
6762 /* Set/show detach-on-fork: user-settable mode. */
6763
6764 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6765 Set whether gdb will detach the child of a fork."), _("\
6766 Show whether gdb will detach the child of a fork."), _("\
6767 Tells gdb whether to detach the child of a fork."),
6768 NULL, NULL, &setlist, &showlist);
6769
6770 /* ptid initializations */
6771 null_ptid = ptid_build (0, 0, 0);
6772 minus_one_ptid = ptid_build (-1, 0, 0);
6773 inferior_ptid = null_ptid;
6774 target_last_wait_ptid = minus_one_ptid;
6775
6776 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6777 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6778 observer_attach_thread_exit (infrun_thread_thread_exit);
6779 observer_attach_inferior_exit (infrun_inferior_exit);
6780
6781 /* Explicitly create without lookup, since that tries to create a
6782 value with a void typed value, and when we get here, gdbarch
6783 isn't initialized yet. At this point, we're quite sure there
6784 isn't another convenience variable of the same name. */
6785 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6786
6787 add_setshow_boolean_cmd ("observer", no_class,
6788 &observer_mode_1, _("\
6789 Set whether gdb controls the inferior in observer mode."), _("\
6790 Show whether gdb controls the inferior in observer mode."), _("\
6791 In observer mode, GDB can get data from the inferior, but not\n\
6792 affect its execution. Registers and memory may not be changed,\n\
6793 breakpoints may not be set, and the program cannot be interrupted\n\
6794 or signalled."),
6795 set_observer_mode,
6796 show_observer_mode,
6797 &setlist,
6798 &showlist);
6799 }
This page took 0.288912 seconds and 4 git commands to generate.