gdb/
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009, 2010 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53 #include "jit.h"
54 #include "tracepoint.h"
55
56 /* Prototypes for local functions */
57
58 static void signals_info (char *, int);
59
60 static void handle_command (char *, int);
61
62 static void sig_print_info (enum target_signal);
63
64 static void sig_print_header (void);
65
66 static void resume_cleanups (void *);
67
68 static int hook_stop_stub (void *);
69
70 static int restore_selected_frame (void *);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 static void print_exited_reason (int exitstatus);
87
88 static void print_signal_exited_reason (enum target_signal siggnal);
89
90 static void print_no_history_reason (void);
91
92 static void print_signal_received_reason (enum target_signal siggnal);
93
94 static void print_end_stepping_range_reason (void);
95
96 void _initialize_infrun (void);
97
98 void nullify_last_target_wait_ptid (void);
99
100 /* When set, stop the 'step' command if we enter a function which has
101 no line number information. The normal behavior is that we step
102 over such function. */
103 int step_stop_if_no_debug = 0;
104 static void
105 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
106 struct cmd_list_element *c, const char *value)
107 {
108 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
109 }
110
111 /* In asynchronous mode, but simulating synchronous execution. */
112
113 int sync_execution = 0;
114
115 /* wait_for_inferior and normal_stop use this to notify the user
116 when the inferior stopped in a different thread than it had been
117 running in. */
118
119 static ptid_t previous_inferior_ptid;
120
121 /* Default behavior is to detach newly forked processes (legacy). */
122 int detach_fork = 1;
123
124 int debug_displaced = 0;
125 static void
126 show_debug_displaced (struct ui_file *file, int from_tty,
127 struct cmd_list_element *c, const char *value)
128 {
129 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
130 }
131
132 int debug_infrun = 0;
133 static void
134 show_debug_infrun (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
138 }
139
140 /* If the program uses ELF-style shared libraries, then calls to
141 functions in shared libraries go through stubs, which live in a
142 table called the PLT (Procedure Linkage Table). The first time the
143 function is called, the stub sends control to the dynamic linker,
144 which looks up the function's real address, patches the stub so
145 that future calls will go directly to the function, and then passes
146 control to the function.
147
148 If we are stepping at the source level, we don't want to see any of
149 this --- we just want to skip over the stub and the dynamic linker.
150 The simple approach is to single-step until control leaves the
151 dynamic linker.
152
153 However, on some systems (e.g., Red Hat's 5.2 distribution) the
154 dynamic linker calls functions in the shared C library, so you
155 can't tell from the PC alone whether the dynamic linker is still
156 running. In this case, we use a step-resume breakpoint to get us
157 past the dynamic linker, as if we were using "next" to step over a
158 function call.
159
160 in_solib_dynsym_resolve_code() says whether we're in the dynamic
161 linker code or not. Normally, this means we single-step. However,
162 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
163 address where we can place a step-resume breakpoint to get past the
164 linker's symbol resolution function.
165
166 in_solib_dynsym_resolve_code() can generally be implemented in a
167 pretty portable way, by comparing the PC against the address ranges
168 of the dynamic linker's sections.
169
170 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
171 it depends on internal details of the dynamic linker. It's usually
172 not too hard to figure out where to put a breakpoint, but it
173 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
174 sanity checking. If it can't figure things out, returning zero and
175 getting the (possibly confusing) stepping behavior is better than
176 signalling an error, which will obscure the change in the
177 inferior's state. */
178
179 /* This function returns TRUE if pc is the address of an instruction
180 that lies within the dynamic linker (such as the event hook, or the
181 dld itself).
182
183 This function must be used only when a dynamic linker event has
184 been caught, and the inferior is being stepped out of the hook, or
185 undefined results are guaranteed. */
186
187 #ifndef SOLIB_IN_DYNAMIC_LINKER
188 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
189 #endif
190
191 /* "Observer mode" is somewhat like a more extreme version of
192 non-stop, in which all GDB operations that might affect the
193 target's execution have been disabled. */
194
195 static int non_stop_1 = 0;
196
197 int observer_mode = 0;
198 static int observer_mode_1 = 0;
199
200 static void
201 set_observer_mode (char *args, int from_tty,
202 struct cmd_list_element *c)
203 {
204 extern int pagination_enabled;
205
206 if (target_has_execution)
207 {
208 observer_mode_1 = observer_mode;
209 error (_("Cannot change this setting while the inferior is running."));
210 }
211
212 observer_mode = observer_mode_1;
213
214 may_write_registers = !observer_mode;
215 may_write_memory = !observer_mode;
216 may_insert_breakpoints = !observer_mode;
217 may_insert_tracepoints = !observer_mode;
218 /* We can insert fast tracepoints in or out of observer mode,
219 but enable them if we're going into this mode. */
220 if (observer_mode)
221 may_insert_fast_tracepoints = 1;
222 may_stop = !observer_mode;
223 update_target_permissions ();
224
225 /* Going *into* observer mode we must force non-stop, then
226 going out we leave it that way. */
227 if (observer_mode)
228 {
229 target_async_permitted = 1;
230 pagination_enabled = 0;
231 non_stop = non_stop_1 = 1;
232 }
233
234 if (from_tty)
235 printf_filtered (_("Observer mode is now %s.\n"),
236 (observer_mode ? "on" : "off"));
237 }
238
239 static void
240 show_observer_mode (struct ui_file *file, int from_tty,
241 struct cmd_list_element *c, const char *value)
242 {
243 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
244 }
245
246 /* This updates the value of observer mode based on changes in
247 permissions. Note that we are deliberately ignoring the values of
248 may-write-registers and may-write-memory, since the user may have
249 reason to enable these during a session, for instance to turn on a
250 debugging-related global. */
251
252 void
253 update_observer_mode (void)
254 {
255 int newval;
256
257 newval = (!may_insert_breakpoints
258 && !may_insert_tracepoints
259 && may_insert_fast_tracepoints
260 && !may_stop
261 && non_stop);
262
263 /* Let the user know if things change. */
264 if (newval != observer_mode)
265 printf_filtered (_("Observer mode is now %s.\n"),
266 (newval ? "on" : "off"));
267
268 observer_mode = observer_mode_1 = newval;
269 }
270
271 /* Tables of how to react to signals; the user sets them. */
272
273 static unsigned char *signal_stop;
274 static unsigned char *signal_print;
275 static unsigned char *signal_program;
276
277 #define SET_SIGS(nsigs,sigs,flags) \
278 do { \
279 int signum = (nsigs); \
280 while (signum-- > 0) \
281 if ((sigs)[signum]) \
282 (flags)[signum] = 1; \
283 } while (0)
284
285 #define UNSET_SIGS(nsigs,sigs,flags) \
286 do { \
287 int signum = (nsigs); \
288 while (signum-- > 0) \
289 if ((sigs)[signum]) \
290 (flags)[signum] = 0; \
291 } while (0)
292
293 /* Value to pass to target_resume() to cause all threads to resume */
294
295 #define RESUME_ALL minus_one_ptid
296
297 /* Command list pointer for the "stop" placeholder. */
298
299 static struct cmd_list_element *stop_command;
300
301 /* Function inferior was in as of last step command. */
302
303 static struct symbol *step_start_function;
304
305 /* Nonzero if we want to give control to the user when we're notified
306 of shared library events by the dynamic linker. */
307 int stop_on_solib_events;
308 static void
309 show_stop_on_solib_events (struct ui_file *file, int from_tty,
310 struct cmd_list_element *c, const char *value)
311 {
312 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
313 value);
314 }
315
316 /* Nonzero means expecting a trace trap
317 and should stop the inferior and return silently when it happens. */
318
319 int stop_after_trap;
320
321 /* Save register contents here when executing a "finish" command or are
322 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
323 Thus this contains the return value from the called function (assuming
324 values are returned in a register). */
325
326 struct regcache *stop_registers;
327
328 /* Nonzero after stop if current stack frame should be printed. */
329
330 static int stop_print_frame;
331
332 /* This is a cached copy of the pid/waitstatus of the last event
333 returned by target_wait()/deprecated_target_wait_hook(). This
334 information is returned by get_last_target_status(). */
335 static ptid_t target_last_wait_ptid;
336 static struct target_waitstatus target_last_waitstatus;
337
338 static void context_switch (ptid_t ptid);
339
340 void init_thread_stepping_state (struct thread_info *tss);
341
342 void init_infwait_state (void);
343
344 static const char follow_fork_mode_child[] = "child";
345 static const char follow_fork_mode_parent[] = "parent";
346
347 static const char *follow_fork_mode_kind_names[] = {
348 follow_fork_mode_child,
349 follow_fork_mode_parent,
350 NULL
351 };
352
353 static const char *follow_fork_mode_string = follow_fork_mode_parent;
354 static void
355 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
356 struct cmd_list_element *c, const char *value)
357 {
358 fprintf_filtered (file, _("\
359 Debugger response to a program call of fork or vfork is \"%s\".\n"),
360 value);
361 }
362 \f
363
364 /* Tell the target to follow the fork we're stopped at. Returns true
365 if the inferior should be resumed; false, if the target for some
366 reason decided it's best not to resume. */
367
368 static int
369 follow_fork (void)
370 {
371 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
372 int should_resume = 1;
373 struct thread_info *tp;
374
375 /* Copy user stepping state to the new inferior thread. FIXME: the
376 followed fork child thread should have a copy of most of the
377 parent thread structure's run control related fields, not just these.
378 Initialized to avoid "may be used uninitialized" warnings from gcc. */
379 struct breakpoint *step_resume_breakpoint = NULL;
380 CORE_ADDR step_range_start = 0;
381 CORE_ADDR step_range_end = 0;
382 struct frame_id step_frame_id = { 0 };
383
384 if (!non_stop)
385 {
386 ptid_t wait_ptid;
387 struct target_waitstatus wait_status;
388
389 /* Get the last target status returned by target_wait(). */
390 get_last_target_status (&wait_ptid, &wait_status);
391
392 /* If not stopped at a fork event, then there's nothing else to
393 do. */
394 if (wait_status.kind != TARGET_WAITKIND_FORKED
395 && wait_status.kind != TARGET_WAITKIND_VFORKED)
396 return 1;
397
398 /* Check if we switched over from WAIT_PTID, since the event was
399 reported. */
400 if (!ptid_equal (wait_ptid, minus_one_ptid)
401 && !ptid_equal (inferior_ptid, wait_ptid))
402 {
403 /* We did. Switch back to WAIT_PTID thread, to tell the
404 target to follow it (in either direction). We'll
405 afterwards refuse to resume, and inform the user what
406 happened. */
407 switch_to_thread (wait_ptid);
408 should_resume = 0;
409 }
410 }
411
412 tp = inferior_thread ();
413
414 /* If there were any forks/vforks that were caught and are now to be
415 followed, then do so now. */
416 switch (tp->pending_follow.kind)
417 {
418 case TARGET_WAITKIND_FORKED:
419 case TARGET_WAITKIND_VFORKED:
420 {
421 ptid_t parent, child;
422
423 /* If the user did a next/step, etc, over a fork call,
424 preserve the stepping state in the fork child. */
425 if (follow_child && should_resume)
426 {
427 step_resume_breakpoint
428 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
429 step_range_start = tp->step_range_start;
430 step_range_end = tp->step_range_end;
431 step_frame_id = tp->step_frame_id;
432
433 /* For now, delete the parent's sr breakpoint, otherwise,
434 parent/child sr breakpoints are considered duplicates,
435 and the child version will not be installed. Remove
436 this when the breakpoints module becomes aware of
437 inferiors and address spaces. */
438 delete_step_resume_breakpoint (tp);
439 tp->step_range_start = 0;
440 tp->step_range_end = 0;
441 tp->step_frame_id = null_frame_id;
442 }
443
444 parent = inferior_ptid;
445 child = tp->pending_follow.value.related_pid;
446
447 /* Tell the target to do whatever is necessary to follow
448 either parent or child. */
449 if (target_follow_fork (follow_child))
450 {
451 /* Target refused to follow, or there's some other reason
452 we shouldn't resume. */
453 should_resume = 0;
454 }
455 else
456 {
457 /* This pending follow fork event is now handled, one way
458 or another. The previous selected thread may be gone
459 from the lists by now, but if it is still around, need
460 to clear the pending follow request. */
461 tp = find_thread_ptid (parent);
462 if (tp)
463 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
464
465 /* This makes sure we don't try to apply the "Switched
466 over from WAIT_PID" logic above. */
467 nullify_last_target_wait_ptid ();
468
469 /* If we followed the child, switch to it... */
470 if (follow_child)
471 {
472 switch_to_thread (child);
473
474 /* ... and preserve the stepping state, in case the
475 user was stepping over the fork call. */
476 if (should_resume)
477 {
478 tp = inferior_thread ();
479 tp->step_resume_breakpoint = step_resume_breakpoint;
480 tp->step_range_start = step_range_start;
481 tp->step_range_end = step_range_end;
482 tp->step_frame_id = step_frame_id;
483 }
484 else
485 {
486 /* If we get here, it was because we're trying to
487 resume from a fork catchpoint, but, the user
488 has switched threads away from the thread that
489 forked. In that case, the resume command
490 issued is most likely not applicable to the
491 child, so just warn, and refuse to resume. */
492 warning (_("\
493 Not resuming: switched threads before following fork child.\n"));
494 }
495
496 /* Reset breakpoints in the child as appropriate. */
497 follow_inferior_reset_breakpoints ();
498 }
499 else
500 switch_to_thread (parent);
501 }
502 }
503 break;
504 case TARGET_WAITKIND_SPURIOUS:
505 /* Nothing to follow. */
506 break;
507 default:
508 internal_error (__FILE__, __LINE__,
509 "Unexpected pending_follow.kind %d\n",
510 tp->pending_follow.kind);
511 break;
512 }
513
514 return should_resume;
515 }
516
517 void
518 follow_inferior_reset_breakpoints (void)
519 {
520 struct thread_info *tp = inferior_thread ();
521
522 /* Was there a step_resume breakpoint? (There was if the user
523 did a "next" at the fork() call.) If so, explicitly reset its
524 thread number.
525
526 step_resumes are a form of bp that are made to be per-thread.
527 Since we created the step_resume bp when the parent process
528 was being debugged, and now are switching to the child process,
529 from the breakpoint package's viewpoint, that's a switch of
530 "threads". We must update the bp's notion of which thread
531 it is for, or it'll be ignored when it triggers. */
532
533 if (tp->step_resume_breakpoint)
534 breakpoint_re_set_thread (tp->step_resume_breakpoint);
535
536 /* Reinsert all breakpoints in the child. The user may have set
537 breakpoints after catching the fork, in which case those
538 were never set in the child, but only in the parent. This makes
539 sure the inserted breakpoints match the breakpoint list. */
540
541 breakpoint_re_set ();
542 insert_breakpoints ();
543 }
544
545 /* The child has exited or execed: resume threads of the parent the
546 user wanted to be executing. */
547
548 static int
549 proceed_after_vfork_done (struct thread_info *thread,
550 void *arg)
551 {
552 int pid = * (int *) arg;
553
554 if (ptid_get_pid (thread->ptid) == pid
555 && is_running (thread->ptid)
556 && !is_executing (thread->ptid)
557 && !thread->stop_requested
558 && thread->stop_signal == TARGET_SIGNAL_0)
559 {
560 if (debug_infrun)
561 fprintf_unfiltered (gdb_stdlog,
562 "infrun: resuming vfork parent thread %s\n",
563 target_pid_to_str (thread->ptid));
564
565 switch_to_thread (thread->ptid);
566 clear_proceed_status ();
567 proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
568 }
569
570 return 0;
571 }
572
573 /* Called whenever we notice an exec or exit event, to handle
574 detaching or resuming a vfork parent. */
575
576 static void
577 handle_vfork_child_exec_or_exit (int exec)
578 {
579 struct inferior *inf = current_inferior ();
580
581 if (inf->vfork_parent)
582 {
583 int resume_parent = -1;
584
585 /* This exec or exit marks the end of the shared memory region
586 between the parent and the child. If the user wanted to
587 detach from the parent, now is the time. */
588
589 if (inf->vfork_parent->pending_detach)
590 {
591 struct thread_info *tp;
592 struct cleanup *old_chain;
593 struct program_space *pspace;
594 struct address_space *aspace;
595
596 /* follow-fork child, detach-on-fork on */
597
598 old_chain = make_cleanup_restore_current_thread ();
599
600 /* We're letting loose of the parent. */
601 tp = any_live_thread_of_process (inf->vfork_parent->pid);
602 switch_to_thread (tp->ptid);
603
604 /* We're about to detach from the parent, which implicitly
605 removes breakpoints from its address space. There's a
606 catch here: we want to reuse the spaces for the child,
607 but, parent/child are still sharing the pspace at this
608 point, although the exec in reality makes the kernel give
609 the child a fresh set of new pages. The problem here is
610 that the breakpoints module being unaware of this, would
611 likely chose the child process to write to the parent
612 address space. Swapping the child temporarily away from
613 the spaces has the desired effect. Yes, this is "sort
614 of" a hack. */
615
616 pspace = inf->pspace;
617 aspace = inf->aspace;
618 inf->aspace = NULL;
619 inf->pspace = NULL;
620
621 if (debug_infrun || info_verbose)
622 {
623 target_terminal_ours ();
624
625 if (exec)
626 fprintf_filtered (gdb_stdlog,
627 "Detaching vfork parent process %d after child exec.\n",
628 inf->vfork_parent->pid);
629 else
630 fprintf_filtered (gdb_stdlog,
631 "Detaching vfork parent process %d after child exit.\n",
632 inf->vfork_parent->pid);
633 }
634
635 target_detach (NULL, 0);
636
637 /* Put it back. */
638 inf->pspace = pspace;
639 inf->aspace = aspace;
640
641 do_cleanups (old_chain);
642 }
643 else if (exec)
644 {
645 /* We're staying attached to the parent, so, really give the
646 child a new address space. */
647 inf->pspace = add_program_space (maybe_new_address_space ());
648 inf->aspace = inf->pspace->aspace;
649 inf->removable = 1;
650 set_current_program_space (inf->pspace);
651
652 resume_parent = inf->vfork_parent->pid;
653
654 /* Break the bonds. */
655 inf->vfork_parent->vfork_child = NULL;
656 }
657 else
658 {
659 struct cleanup *old_chain;
660 struct program_space *pspace;
661
662 /* If this is a vfork child exiting, then the pspace and
663 aspaces were shared with the parent. Since we're
664 reporting the process exit, we'll be mourning all that is
665 found in the address space, and switching to null_ptid,
666 preparing to start a new inferior. But, since we don't
667 want to clobber the parent's address/program spaces, we
668 go ahead and create a new one for this exiting
669 inferior. */
670
671 /* Switch to null_ptid, so that clone_program_space doesn't want
672 to read the selected frame of a dead process. */
673 old_chain = save_inferior_ptid ();
674 inferior_ptid = null_ptid;
675
676 /* This inferior is dead, so avoid giving the breakpoints
677 module the option to write through to it (cloning a
678 program space resets breakpoints). */
679 inf->aspace = NULL;
680 inf->pspace = NULL;
681 pspace = add_program_space (maybe_new_address_space ());
682 set_current_program_space (pspace);
683 inf->removable = 1;
684 clone_program_space (pspace, inf->vfork_parent->pspace);
685 inf->pspace = pspace;
686 inf->aspace = pspace->aspace;
687
688 /* Put back inferior_ptid. We'll continue mourning this
689 inferior. */
690 do_cleanups (old_chain);
691
692 resume_parent = inf->vfork_parent->pid;
693 /* Break the bonds. */
694 inf->vfork_parent->vfork_child = NULL;
695 }
696
697 inf->vfork_parent = NULL;
698
699 gdb_assert (current_program_space == inf->pspace);
700
701 if (non_stop && resume_parent != -1)
702 {
703 /* If the user wanted the parent to be running, let it go
704 free now. */
705 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
706
707 if (debug_infrun)
708 fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
709 resume_parent);
710
711 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
712
713 do_cleanups (old_chain);
714 }
715 }
716 }
717
718 /* Enum strings for "set|show displaced-stepping". */
719
720 static const char follow_exec_mode_new[] = "new";
721 static const char follow_exec_mode_same[] = "same";
722 static const char *follow_exec_mode_names[] =
723 {
724 follow_exec_mode_new,
725 follow_exec_mode_same,
726 NULL,
727 };
728
729 static const char *follow_exec_mode_string = follow_exec_mode_same;
730 static void
731 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
732 struct cmd_list_element *c, const char *value)
733 {
734 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
735 }
736
737 /* EXECD_PATHNAME is assumed to be non-NULL. */
738
739 static void
740 follow_exec (ptid_t pid, char *execd_pathname)
741 {
742 struct thread_info *th = inferior_thread ();
743 struct inferior *inf = current_inferior ();
744
745 /* This is an exec event that we actually wish to pay attention to.
746 Refresh our symbol table to the newly exec'd program, remove any
747 momentary bp's, etc.
748
749 If there are breakpoints, they aren't really inserted now,
750 since the exec() transformed our inferior into a fresh set
751 of instructions.
752
753 We want to preserve symbolic breakpoints on the list, since
754 we have hopes that they can be reset after the new a.out's
755 symbol table is read.
756
757 However, any "raw" breakpoints must be removed from the list
758 (e.g., the solib bp's), since their address is probably invalid
759 now.
760
761 And, we DON'T want to call delete_breakpoints() here, since
762 that may write the bp's "shadow contents" (the instruction
763 value that was overwritten witha TRAP instruction). Since
764 we now have a new a.out, those shadow contents aren't valid. */
765
766 mark_breakpoints_out ();
767
768 update_breakpoints_after_exec ();
769
770 /* If there was one, it's gone now. We cannot truly step-to-next
771 statement through an exec(). */
772 th->step_resume_breakpoint = NULL;
773 th->step_range_start = 0;
774 th->step_range_end = 0;
775
776 /* The target reports the exec event to the main thread, even if
777 some other thread does the exec, and even if the main thread was
778 already stopped --- if debugging in non-stop mode, it's possible
779 the user had the main thread held stopped in the previous image
780 --- release it now. This is the same behavior as step-over-exec
781 with scheduler-locking on in all-stop mode. */
782 th->stop_requested = 0;
783
784 /* What is this a.out's name? */
785 printf_unfiltered (_("%s is executing new program: %s\n"),
786 target_pid_to_str (inferior_ptid),
787 execd_pathname);
788
789 /* We've followed the inferior through an exec. Therefore, the
790 inferior has essentially been killed & reborn. */
791
792 gdb_flush (gdb_stdout);
793
794 breakpoint_init_inferior (inf_execd);
795
796 if (gdb_sysroot && *gdb_sysroot)
797 {
798 char *name = alloca (strlen (gdb_sysroot)
799 + strlen (execd_pathname)
800 + 1);
801
802 strcpy (name, gdb_sysroot);
803 strcat (name, execd_pathname);
804 execd_pathname = name;
805 }
806
807 /* Reset the shared library package. This ensures that we get a
808 shlib event when the child reaches "_start", at which point the
809 dld will have had a chance to initialize the child. */
810 /* Also, loading a symbol file below may trigger symbol lookups, and
811 we don't want those to be satisfied by the libraries of the
812 previous incarnation of this process. */
813 no_shared_libraries (NULL, 0);
814
815 if (follow_exec_mode_string == follow_exec_mode_new)
816 {
817 struct program_space *pspace;
818
819 /* The user wants to keep the old inferior and program spaces
820 around. Create a new fresh one, and switch to it. */
821
822 inf = add_inferior (current_inferior ()->pid);
823 pspace = add_program_space (maybe_new_address_space ());
824 inf->pspace = pspace;
825 inf->aspace = pspace->aspace;
826
827 exit_inferior_num_silent (current_inferior ()->num);
828
829 set_current_inferior (inf);
830 set_current_program_space (pspace);
831 }
832
833 gdb_assert (current_program_space == inf->pspace);
834
835 /* That a.out is now the one to use. */
836 exec_file_attach (execd_pathname, 0);
837
838 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
839 (Position Independent Executable) main symbol file will get applied by
840 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
841 the breakpoints with the zero displacement. */
842
843 symbol_file_add (execd_pathname, SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET,
844 NULL, 0);
845
846 set_initial_language ();
847
848 #ifdef SOLIB_CREATE_INFERIOR_HOOK
849 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
850 #else
851 solib_create_inferior_hook (0);
852 #endif
853
854 jit_inferior_created_hook ();
855
856 breakpoint_re_set ();
857
858 /* Reinsert all breakpoints. (Those which were symbolic have
859 been reset to the proper address in the new a.out, thanks
860 to symbol_file_command...) */
861 insert_breakpoints ();
862
863 /* The next resume of this inferior should bring it to the shlib
864 startup breakpoints. (If the user had also set bp's on
865 "main" from the old (parent) process, then they'll auto-
866 matically get reset there in the new process.) */
867 }
868
869 /* Non-zero if we just simulating a single-step. This is needed
870 because we cannot remove the breakpoints in the inferior process
871 until after the `wait' in `wait_for_inferior'. */
872 static int singlestep_breakpoints_inserted_p = 0;
873
874 /* The thread we inserted single-step breakpoints for. */
875 static ptid_t singlestep_ptid;
876
877 /* PC when we started this single-step. */
878 static CORE_ADDR singlestep_pc;
879
880 /* If another thread hit the singlestep breakpoint, we save the original
881 thread here so that we can resume single-stepping it later. */
882 static ptid_t saved_singlestep_ptid;
883 static int stepping_past_singlestep_breakpoint;
884
885 /* If not equal to null_ptid, this means that after stepping over breakpoint
886 is finished, we need to switch to deferred_step_ptid, and step it.
887
888 The use case is when one thread has hit a breakpoint, and then the user
889 has switched to another thread and issued 'step'. We need to step over
890 breakpoint in the thread which hit the breakpoint, but then continue
891 stepping the thread user has selected. */
892 static ptid_t deferred_step_ptid;
893 \f
894 /* Displaced stepping. */
895
896 /* In non-stop debugging mode, we must take special care to manage
897 breakpoints properly; in particular, the traditional strategy for
898 stepping a thread past a breakpoint it has hit is unsuitable.
899 'Displaced stepping' is a tactic for stepping one thread past a
900 breakpoint it has hit while ensuring that other threads running
901 concurrently will hit the breakpoint as they should.
902
903 The traditional way to step a thread T off a breakpoint in a
904 multi-threaded program in all-stop mode is as follows:
905
906 a0) Initially, all threads are stopped, and breakpoints are not
907 inserted.
908 a1) We single-step T, leaving breakpoints uninserted.
909 a2) We insert breakpoints, and resume all threads.
910
911 In non-stop debugging, however, this strategy is unsuitable: we
912 don't want to have to stop all threads in the system in order to
913 continue or step T past a breakpoint. Instead, we use displaced
914 stepping:
915
916 n0) Initially, T is stopped, other threads are running, and
917 breakpoints are inserted.
918 n1) We copy the instruction "under" the breakpoint to a separate
919 location, outside the main code stream, making any adjustments
920 to the instruction, register, and memory state as directed by
921 T's architecture.
922 n2) We single-step T over the instruction at its new location.
923 n3) We adjust the resulting register and memory state as directed
924 by T's architecture. This includes resetting T's PC to point
925 back into the main instruction stream.
926 n4) We resume T.
927
928 This approach depends on the following gdbarch methods:
929
930 - gdbarch_max_insn_length and gdbarch_displaced_step_location
931 indicate where to copy the instruction, and how much space must
932 be reserved there. We use these in step n1.
933
934 - gdbarch_displaced_step_copy_insn copies a instruction to a new
935 address, and makes any necessary adjustments to the instruction,
936 register contents, and memory. We use this in step n1.
937
938 - gdbarch_displaced_step_fixup adjusts registers and memory after
939 we have successfuly single-stepped the instruction, to yield the
940 same effect the instruction would have had if we had executed it
941 at its original address. We use this in step n3.
942
943 - gdbarch_displaced_step_free_closure provides cleanup.
944
945 The gdbarch_displaced_step_copy_insn and
946 gdbarch_displaced_step_fixup functions must be written so that
947 copying an instruction with gdbarch_displaced_step_copy_insn,
948 single-stepping across the copied instruction, and then applying
949 gdbarch_displaced_insn_fixup should have the same effects on the
950 thread's memory and registers as stepping the instruction in place
951 would have. Exactly which responsibilities fall to the copy and
952 which fall to the fixup is up to the author of those functions.
953
954 See the comments in gdbarch.sh for details.
955
956 Note that displaced stepping and software single-step cannot
957 currently be used in combination, although with some care I think
958 they could be made to. Software single-step works by placing
959 breakpoints on all possible subsequent instructions; if the
960 displaced instruction is a PC-relative jump, those breakpoints
961 could fall in very strange places --- on pages that aren't
962 executable, or at addresses that are not proper instruction
963 boundaries. (We do generally let other threads run while we wait
964 to hit the software single-step breakpoint, and they might
965 encounter such a corrupted instruction.) One way to work around
966 this would be to have gdbarch_displaced_step_copy_insn fully
967 simulate the effect of PC-relative instructions (and return NULL)
968 on architectures that use software single-stepping.
969
970 In non-stop mode, we can have independent and simultaneous step
971 requests, so more than one thread may need to simultaneously step
972 over a breakpoint. The current implementation assumes there is
973 only one scratch space per process. In this case, we have to
974 serialize access to the scratch space. If thread A wants to step
975 over a breakpoint, but we are currently waiting for some other
976 thread to complete a displaced step, we leave thread A stopped and
977 place it in the displaced_step_request_queue. Whenever a displaced
978 step finishes, we pick the next thread in the queue and start a new
979 displaced step operation on it. See displaced_step_prepare and
980 displaced_step_fixup for details. */
981
982 struct displaced_step_request
983 {
984 ptid_t ptid;
985 struct displaced_step_request *next;
986 };
987
988 /* Per-inferior displaced stepping state. */
989 struct displaced_step_inferior_state
990 {
991 /* Pointer to next in linked list. */
992 struct displaced_step_inferior_state *next;
993
994 /* The process this displaced step state refers to. */
995 int pid;
996
997 /* A queue of pending displaced stepping requests. One entry per
998 thread that needs to do a displaced step. */
999 struct displaced_step_request *step_request_queue;
1000
1001 /* If this is not null_ptid, this is the thread carrying out a
1002 displaced single-step in process PID. This thread's state will
1003 require fixing up once it has completed its step. */
1004 ptid_t step_ptid;
1005
1006 /* The architecture the thread had when we stepped it. */
1007 struct gdbarch *step_gdbarch;
1008
1009 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1010 for post-step cleanup. */
1011 struct displaced_step_closure *step_closure;
1012
1013 /* The address of the original instruction, and the copy we
1014 made. */
1015 CORE_ADDR step_original, step_copy;
1016
1017 /* Saved contents of copy area. */
1018 gdb_byte *step_saved_copy;
1019 };
1020
1021 /* The list of states of processes involved in displaced stepping
1022 presently. */
1023 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1024
1025 /* Get the displaced stepping state of process PID. */
1026
1027 static struct displaced_step_inferior_state *
1028 get_displaced_stepping_state (int pid)
1029 {
1030 struct displaced_step_inferior_state *state;
1031
1032 for (state = displaced_step_inferior_states;
1033 state != NULL;
1034 state = state->next)
1035 if (state->pid == pid)
1036 return state;
1037
1038 return NULL;
1039 }
1040
1041 /* Add a new displaced stepping state for process PID to the displaced
1042 stepping state list, or return a pointer to an already existing
1043 entry, if it already exists. Never returns NULL. */
1044
1045 static struct displaced_step_inferior_state *
1046 add_displaced_stepping_state (int pid)
1047 {
1048 struct displaced_step_inferior_state *state;
1049
1050 for (state = displaced_step_inferior_states;
1051 state != NULL;
1052 state = state->next)
1053 if (state->pid == pid)
1054 return state;
1055
1056 state = xcalloc (1, sizeof (*state));
1057 state->pid = pid;
1058 state->next = displaced_step_inferior_states;
1059 displaced_step_inferior_states = state;
1060
1061 return state;
1062 }
1063
1064 /* Remove the displaced stepping state of process PID. */
1065
1066 static void
1067 remove_displaced_stepping_state (int pid)
1068 {
1069 struct displaced_step_inferior_state *it, **prev_next_p;
1070
1071 gdb_assert (pid != 0);
1072
1073 it = displaced_step_inferior_states;
1074 prev_next_p = &displaced_step_inferior_states;
1075 while (it)
1076 {
1077 if (it->pid == pid)
1078 {
1079 *prev_next_p = it->next;
1080 xfree (it);
1081 return;
1082 }
1083
1084 prev_next_p = &it->next;
1085 it = *prev_next_p;
1086 }
1087 }
1088
1089 static void
1090 infrun_inferior_exit (struct inferior *inf)
1091 {
1092 remove_displaced_stepping_state (inf->pid);
1093 }
1094
1095 /* Enum strings for "set|show displaced-stepping". */
1096
1097 static const char can_use_displaced_stepping_auto[] = "auto";
1098 static const char can_use_displaced_stepping_on[] = "on";
1099 static const char can_use_displaced_stepping_off[] = "off";
1100 static const char *can_use_displaced_stepping_enum[] =
1101 {
1102 can_use_displaced_stepping_auto,
1103 can_use_displaced_stepping_on,
1104 can_use_displaced_stepping_off,
1105 NULL,
1106 };
1107
1108 /* If ON, and the architecture supports it, GDB will use displaced
1109 stepping to step over breakpoints. If OFF, or if the architecture
1110 doesn't support it, GDB will instead use the traditional
1111 hold-and-step approach. If AUTO (which is the default), GDB will
1112 decide which technique to use to step over breakpoints depending on
1113 which of all-stop or non-stop mode is active --- displaced stepping
1114 in non-stop mode; hold-and-step in all-stop mode. */
1115
1116 static const char *can_use_displaced_stepping =
1117 can_use_displaced_stepping_auto;
1118
1119 static void
1120 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1121 struct cmd_list_element *c,
1122 const char *value)
1123 {
1124 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1125 fprintf_filtered (file, _("\
1126 Debugger's willingness to use displaced stepping to step over \
1127 breakpoints is %s (currently %s).\n"),
1128 value, non_stop ? "on" : "off");
1129 else
1130 fprintf_filtered (file, _("\
1131 Debugger's willingness to use displaced stepping to step over \
1132 breakpoints is %s.\n"), value);
1133 }
1134
1135 /* Return non-zero if displaced stepping can/should be used to step
1136 over breakpoints. */
1137
1138 static int
1139 use_displaced_stepping (struct gdbarch *gdbarch)
1140 {
1141 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1142 && non_stop)
1143 || can_use_displaced_stepping == can_use_displaced_stepping_on)
1144 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1145 && !RECORD_IS_USED);
1146 }
1147
1148 /* Clean out any stray displaced stepping state. */
1149 static void
1150 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1151 {
1152 /* Indicate that there is no cleanup pending. */
1153 displaced->step_ptid = null_ptid;
1154
1155 if (displaced->step_closure)
1156 {
1157 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1158 displaced->step_closure);
1159 displaced->step_closure = NULL;
1160 }
1161 }
1162
1163 static void
1164 displaced_step_clear_cleanup (void *arg)
1165 {
1166 struct displaced_step_inferior_state *state = arg;
1167
1168 displaced_step_clear (state);
1169 }
1170
1171 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1172 void
1173 displaced_step_dump_bytes (struct ui_file *file,
1174 const gdb_byte *buf,
1175 size_t len)
1176 {
1177 int i;
1178
1179 for (i = 0; i < len; i++)
1180 fprintf_unfiltered (file, "%02x ", buf[i]);
1181 fputs_unfiltered ("\n", file);
1182 }
1183
1184 /* Prepare to single-step, using displaced stepping.
1185
1186 Note that we cannot use displaced stepping when we have a signal to
1187 deliver. If we have a signal to deliver and an instruction to step
1188 over, then after the step, there will be no indication from the
1189 target whether the thread entered a signal handler or ignored the
1190 signal and stepped over the instruction successfully --- both cases
1191 result in a simple SIGTRAP. In the first case we mustn't do a
1192 fixup, and in the second case we must --- but we can't tell which.
1193 Comments in the code for 'random signals' in handle_inferior_event
1194 explain how we handle this case instead.
1195
1196 Returns 1 if preparing was successful -- this thread is going to be
1197 stepped now; or 0 if displaced stepping this thread got queued. */
1198 static int
1199 displaced_step_prepare (ptid_t ptid)
1200 {
1201 struct cleanup *old_cleanups, *ignore_cleanups;
1202 struct regcache *regcache = get_thread_regcache (ptid);
1203 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1204 CORE_ADDR original, copy;
1205 ULONGEST len;
1206 struct displaced_step_closure *closure;
1207 struct displaced_step_inferior_state *displaced;
1208
1209 /* We should never reach this function if the architecture does not
1210 support displaced stepping. */
1211 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1212
1213 /* We have to displaced step one thread at a time, as we only have
1214 access to a single scratch space per inferior. */
1215
1216 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1217
1218 if (!ptid_equal (displaced->step_ptid, null_ptid))
1219 {
1220 /* Already waiting for a displaced step to finish. Defer this
1221 request and place in queue. */
1222 struct displaced_step_request *req, *new_req;
1223
1224 if (debug_displaced)
1225 fprintf_unfiltered (gdb_stdlog,
1226 "displaced: defering step of %s\n",
1227 target_pid_to_str (ptid));
1228
1229 new_req = xmalloc (sizeof (*new_req));
1230 new_req->ptid = ptid;
1231 new_req->next = NULL;
1232
1233 if (displaced->step_request_queue)
1234 {
1235 for (req = displaced->step_request_queue;
1236 req && req->next;
1237 req = req->next)
1238 ;
1239 req->next = new_req;
1240 }
1241 else
1242 displaced->step_request_queue = new_req;
1243
1244 return 0;
1245 }
1246 else
1247 {
1248 if (debug_displaced)
1249 fprintf_unfiltered (gdb_stdlog,
1250 "displaced: stepping %s now\n",
1251 target_pid_to_str (ptid));
1252 }
1253
1254 displaced_step_clear (displaced);
1255
1256 old_cleanups = save_inferior_ptid ();
1257 inferior_ptid = ptid;
1258
1259 original = regcache_read_pc (regcache);
1260
1261 copy = gdbarch_displaced_step_location (gdbarch);
1262 len = gdbarch_max_insn_length (gdbarch);
1263
1264 /* Save the original contents of the copy area. */
1265 displaced->step_saved_copy = xmalloc (len);
1266 ignore_cleanups = make_cleanup (free_current_contents,
1267 &displaced->step_saved_copy);
1268 read_memory (copy, displaced->step_saved_copy, len);
1269 if (debug_displaced)
1270 {
1271 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1272 paddress (gdbarch, copy));
1273 displaced_step_dump_bytes (gdb_stdlog,
1274 displaced->step_saved_copy,
1275 len);
1276 };
1277
1278 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1279 original, copy, regcache);
1280
1281 /* We don't support the fully-simulated case at present. */
1282 gdb_assert (closure);
1283
1284 /* Save the information we need to fix things up if the step
1285 succeeds. */
1286 displaced->step_ptid = ptid;
1287 displaced->step_gdbarch = gdbarch;
1288 displaced->step_closure = closure;
1289 displaced->step_original = original;
1290 displaced->step_copy = copy;
1291
1292 make_cleanup (displaced_step_clear_cleanup, displaced);
1293
1294 /* Resume execution at the copy. */
1295 regcache_write_pc (regcache, copy);
1296
1297 discard_cleanups (ignore_cleanups);
1298
1299 do_cleanups (old_cleanups);
1300
1301 if (debug_displaced)
1302 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1303 paddress (gdbarch, copy));
1304
1305 return 1;
1306 }
1307
1308 static void
1309 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1310 {
1311 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1312
1313 inferior_ptid = ptid;
1314 write_memory (memaddr, myaddr, len);
1315 do_cleanups (ptid_cleanup);
1316 }
1317
1318 static void
1319 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1320 {
1321 struct cleanup *old_cleanups;
1322 struct displaced_step_inferior_state *displaced
1323 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1324
1325 /* Was any thread of this process doing a displaced step? */
1326 if (displaced == NULL)
1327 return;
1328
1329 /* Was this event for the pid we displaced? */
1330 if (ptid_equal (displaced->step_ptid, null_ptid)
1331 || ! ptid_equal (displaced->step_ptid, event_ptid))
1332 return;
1333
1334 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1335
1336 /* Restore the contents of the copy area. */
1337 {
1338 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1339
1340 write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1341 displaced->step_saved_copy, len);
1342 if (debug_displaced)
1343 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1344 paddress (displaced->step_gdbarch,
1345 displaced->step_copy));
1346 }
1347
1348 /* Did the instruction complete successfully? */
1349 if (signal == TARGET_SIGNAL_TRAP)
1350 {
1351 /* Fix up the resulting state. */
1352 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1353 displaced->step_closure,
1354 displaced->step_original,
1355 displaced->step_copy,
1356 get_thread_regcache (displaced->step_ptid));
1357 }
1358 else
1359 {
1360 /* Since the instruction didn't complete, all we can do is
1361 relocate the PC. */
1362 struct regcache *regcache = get_thread_regcache (event_ptid);
1363 CORE_ADDR pc = regcache_read_pc (regcache);
1364
1365 pc = displaced->step_original + (pc - displaced->step_copy);
1366 regcache_write_pc (regcache, pc);
1367 }
1368
1369 do_cleanups (old_cleanups);
1370
1371 displaced->step_ptid = null_ptid;
1372
1373 /* Are there any pending displaced stepping requests? If so, run
1374 one now. Leave the state object around, since we're likely to
1375 need it again soon. */
1376 while (displaced->step_request_queue)
1377 {
1378 struct displaced_step_request *head;
1379 ptid_t ptid;
1380 struct regcache *regcache;
1381 struct gdbarch *gdbarch;
1382 CORE_ADDR actual_pc;
1383 struct address_space *aspace;
1384
1385 head = displaced->step_request_queue;
1386 ptid = head->ptid;
1387 displaced->step_request_queue = head->next;
1388 xfree (head);
1389
1390 context_switch (ptid);
1391
1392 regcache = get_thread_regcache (ptid);
1393 actual_pc = regcache_read_pc (regcache);
1394 aspace = get_regcache_aspace (regcache);
1395
1396 if (breakpoint_here_p (aspace, actual_pc))
1397 {
1398 if (debug_displaced)
1399 fprintf_unfiltered (gdb_stdlog,
1400 "displaced: stepping queued %s now\n",
1401 target_pid_to_str (ptid));
1402
1403 displaced_step_prepare (ptid);
1404
1405 gdbarch = get_regcache_arch (regcache);
1406
1407 if (debug_displaced)
1408 {
1409 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1410 gdb_byte buf[4];
1411
1412 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1413 paddress (gdbarch, actual_pc));
1414 read_memory (actual_pc, buf, sizeof (buf));
1415 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1416 }
1417
1418 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1419 displaced->step_closure))
1420 target_resume (ptid, 1, TARGET_SIGNAL_0);
1421 else
1422 target_resume (ptid, 0, TARGET_SIGNAL_0);
1423
1424 /* Done, we're stepping a thread. */
1425 break;
1426 }
1427 else
1428 {
1429 int step;
1430 struct thread_info *tp = inferior_thread ();
1431
1432 /* The breakpoint we were sitting under has since been
1433 removed. */
1434 tp->trap_expected = 0;
1435
1436 /* Go back to what we were trying to do. */
1437 step = currently_stepping (tp);
1438
1439 if (debug_displaced)
1440 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1441 target_pid_to_str (tp->ptid), step);
1442
1443 target_resume (ptid, step, TARGET_SIGNAL_0);
1444 tp->stop_signal = TARGET_SIGNAL_0;
1445
1446 /* This request was discarded. See if there's any other
1447 thread waiting for its turn. */
1448 }
1449 }
1450 }
1451
1452 /* Update global variables holding ptids to hold NEW_PTID if they were
1453 holding OLD_PTID. */
1454 static void
1455 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1456 {
1457 struct displaced_step_request *it;
1458 struct displaced_step_inferior_state *displaced;
1459
1460 if (ptid_equal (inferior_ptid, old_ptid))
1461 inferior_ptid = new_ptid;
1462
1463 if (ptid_equal (singlestep_ptid, old_ptid))
1464 singlestep_ptid = new_ptid;
1465
1466 if (ptid_equal (deferred_step_ptid, old_ptid))
1467 deferred_step_ptid = new_ptid;
1468
1469 for (displaced = displaced_step_inferior_states;
1470 displaced;
1471 displaced = displaced->next)
1472 {
1473 if (ptid_equal (displaced->step_ptid, old_ptid))
1474 displaced->step_ptid = new_ptid;
1475
1476 for (it = displaced->step_request_queue; it; it = it->next)
1477 if (ptid_equal (it->ptid, old_ptid))
1478 it->ptid = new_ptid;
1479 }
1480 }
1481
1482 \f
1483 /* Resuming. */
1484
1485 /* Things to clean up if we QUIT out of resume (). */
1486 static void
1487 resume_cleanups (void *ignore)
1488 {
1489 normal_stop ();
1490 }
1491
1492 static const char schedlock_off[] = "off";
1493 static const char schedlock_on[] = "on";
1494 static const char schedlock_step[] = "step";
1495 static const char *scheduler_enums[] = {
1496 schedlock_off,
1497 schedlock_on,
1498 schedlock_step,
1499 NULL
1500 };
1501 static const char *scheduler_mode = schedlock_off;
1502 static void
1503 show_scheduler_mode (struct ui_file *file, int from_tty,
1504 struct cmd_list_element *c, const char *value)
1505 {
1506 fprintf_filtered (file, _("\
1507 Mode for locking scheduler during execution is \"%s\".\n"),
1508 value);
1509 }
1510
1511 static void
1512 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1513 {
1514 if (!target_can_lock_scheduler)
1515 {
1516 scheduler_mode = schedlock_off;
1517 error (_("Target '%s' cannot support this command."), target_shortname);
1518 }
1519 }
1520
1521 /* True if execution commands resume all threads of all processes by
1522 default; otherwise, resume only threads of the current inferior
1523 process. */
1524 int sched_multi = 0;
1525
1526 /* Try to setup for software single stepping over the specified location.
1527 Return 1 if target_resume() should use hardware single step.
1528
1529 GDBARCH the current gdbarch.
1530 PC the location to step over. */
1531
1532 static int
1533 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1534 {
1535 int hw_step = 1;
1536
1537 if (execution_direction == EXEC_FORWARD
1538 && gdbarch_software_single_step_p (gdbarch)
1539 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1540 {
1541 hw_step = 0;
1542 /* Do not pull these breakpoints until after a `wait' in
1543 `wait_for_inferior' */
1544 singlestep_breakpoints_inserted_p = 1;
1545 singlestep_ptid = inferior_ptid;
1546 singlestep_pc = pc;
1547 }
1548 return hw_step;
1549 }
1550
1551 /* Resume the inferior, but allow a QUIT. This is useful if the user
1552 wants to interrupt some lengthy single-stepping operation
1553 (for child processes, the SIGINT goes to the inferior, and so
1554 we get a SIGINT random_signal, but for remote debugging and perhaps
1555 other targets, that's not true).
1556
1557 STEP nonzero if we should step (zero to continue instead).
1558 SIG is the signal to give the inferior (zero for none). */
1559 void
1560 resume (int step, enum target_signal sig)
1561 {
1562 int should_resume = 1;
1563 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1564 struct regcache *regcache = get_current_regcache ();
1565 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1566 struct thread_info *tp = inferior_thread ();
1567 CORE_ADDR pc = regcache_read_pc (regcache);
1568 struct address_space *aspace = get_regcache_aspace (regcache);
1569
1570 QUIT;
1571
1572 if (current_inferior ()->waiting_for_vfork_done)
1573 {
1574 /* Don't try to single-step a vfork parent that is waiting for
1575 the child to get out of the shared memory region (by exec'ing
1576 or exiting). This is particularly important on software
1577 single-step archs, as the child process would trip on the
1578 software single step breakpoint inserted for the parent
1579 process. Since the parent will not actually execute any
1580 instruction until the child is out of the shared region (such
1581 are vfork's semantics), it is safe to simply continue it.
1582 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
1583 the parent, and tell it to `keep_going', which automatically
1584 re-sets it stepping. */
1585 if (debug_infrun)
1586 fprintf_unfiltered (gdb_stdlog,
1587 "infrun: resume : clear step\n");
1588 step = 0;
1589 }
1590
1591 if (debug_infrun)
1592 fprintf_unfiltered (gdb_stdlog,
1593 "infrun: resume (step=%d, signal=%d), "
1594 "trap_expected=%d\n",
1595 step, sig, tp->trap_expected);
1596
1597 /* Normally, by the time we reach `resume', the breakpoints are either
1598 removed or inserted, as appropriate. The exception is if we're sitting
1599 at a permanent breakpoint; we need to step over it, but permanent
1600 breakpoints can't be removed. So we have to test for it here. */
1601 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1602 {
1603 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1604 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1605 else
1606 error (_("\
1607 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1608 how to step past a permanent breakpoint on this architecture. Try using\n\
1609 a command like `return' or `jump' to continue execution."));
1610 }
1611
1612 /* If enabled, step over breakpoints by executing a copy of the
1613 instruction at a different address.
1614
1615 We can't use displaced stepping when we have a signal to deliver;
1616 the comments for displaced_step_prepare explain why. The
1617 comments in the handle_inferior event for dealing with 'random
1618 signals' explain what we do instead.
1619
1620 We can't use displaced stepping when we are waiting for vfork_done
1621 event, displaced stepping breaks the vfork child similarly as single
1622 step software breakpoint. */
1623 if (use_displaced_stepping (gdbarch)
1624 && (tp->trap_expected
1625 || (step && gdbarch_software_single_step_p (gdbarch)))
1626 && sig == TARGET_SIGNAL_0
1627 && !current_inferior ()->waiting_for_vfork_done)
1628 {
1629 struct displaced_step_inferior_state *displaced;
1630
1631 if (!displaced_step_prepare (inferior_ptid))
1632 {
1633 /* Got placed in displaced stepping queue. Will be resumed
1634 later when all the currently queued displaced stepping
1635 requests finish. The thread is not executing at this point,
1636 and the call to set_executing will be made later. But we
1637 need to call set_running here, since from frontend point of view,
1638 the thread is running. */
1639 set_running (inferior_ptid, 1);
1640 discard_cleanups (old_cleanups);
1641 return;
1642 }
1643
1644 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1645 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1646 displaced->step_closure);
1647 }
1648
1649 /* Do we need to do it the hard way, w/temp breakpoints? */
1650 else if (step)
1651 step = maybe_software_singlestep (gdbarch, pc);
1652
1653 if (should_resume)
1654 {
1655 ptid_t resume_ptid;
1656
1657 /* If STEP is set, it's a request to use hardware stepping
1658 facilities. But in that case, we should never
1659 use singlestep breakpoint. */
1660 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1661
1662 /* Decide the set of threads to ask the target to resume. Start
1663 by assuming everything will be resumed, than narrow the set
1664 by applying increasingly restricting conditions. */
1665
1666 /* By default, resume all threads of all processes. */
1667 resume_ptid = RESUME_ALL;
1668
1669 /* Maybe resume only all threads of the current process. */
1670 if (!sched_multi && target_supports_multi_process ())
1671 {
1672 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1673 }
1674
1675 /* Maybe resume a single thread after all. */
1676 if (singlestep_breakpoints_inserted_p
1677 && stepping_past_singlestep_breakpoint)
1678 {
1679 /* The situation here is as follows. In thread T1 we wanted to
1680 single-step. Lacking hardware single-stepping we've
1681 set breakpoint at the PC of the next instruction -- call it
1682 P. After resuming, we've hit that breakpoint in thread T2.
1683 Now we've removed original breakpoint, inserted breakpoint
1684 at P+1, and try to step to advance T2 past breakpoint.
1685 We need to step only T2, as if T1 is allowed to freely run,
1686 it can run past P, and if other threads are allowed to run,
1687 they can hit breakpoint at P+1, and nested hits of single-step
1688 breakpoints is not something we'd want -- that's complicated
1689 to support, and has no value. */
1690 resume_ptid = inferior_ptid;
1691 }
1692 else if ((step || singlestep_breakpoints_inserted_p)
1693 && tp->trap_expected)
1694 {
1695 /* We're allowing a thread to run past a breakpoint it has
1696 hit, by single-stepping the thread with the breakpoint
1697 removed. In which case, we need to single-step only this
1698 thread, and keep others stopped, as they can miss this
1699 breakpoint if allowed to run.
1700
1701 The current code actually removes all breakpoints when
1702 doing this, not just the one being stepped over, so if we
1703 let other threads run, we can actually miss any
1704 breakpoint, not just the one at PC. */
1705 resume_ptid = inferior_ptid;
1706 }
1707 else if (non_stop)
1708 {
1709 /* With non-stop mode on, threads are always handled
1710 individually. */
1711 resume_ptid = inferior_ptid;
1712 }
1713 else if ((scheduler_mode == schedlock_on)
1714 || (scheduler_mode == schedlock_step
1715 && (step || singlestep_breakpoints_inserted_p)))
1716 {
1717 /* User-settable 'scheduler' mode requires solo thread resume. */
1718 resume_ptid = inferior_ptid;
1719 }
1720
1721 if (gdbarch_cannot_step_breakpoint (gdbarch))
1722 {
1723 /* Most targets can step a breakpoint instruction, thus
1724 executing it normally. But if this one cannot, just
1725 continue and we will hit it anyway. */
1726 if (step && breakpoint_inserted_here_p (aspace, pc))
1727 step = 0;
1728 }
1729
1730 if (debug_displaced
1731 && use_displaced_stepping (gdbarch)
1732 && tp->trap_expected)
1733 {
1734 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1735 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1736 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1737 gdb_byte buf[4];
1738
1739 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1740 paddress (resume_gdbarch, actual_pc));
1741 read_memory (actual_pc, buf, sizeof (buf));
1742 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1743 }
1744
1745 /* Install inferior's terminal modes. */
1746 target_terminal_inferior ();
1747
1748 /* Avoid confusing the next resume, if the next stop/resume
1749 happens to apply to another thread. */
1750 tp->stop_signal = TARGET_SIGNAL_0;
1751
1752 target_resume (resume_ptid, step, sig);
1753 }
1754
1755 discard_cleanups (old_cleanups);
1756 }
1757 \f
1758 /* Proceeding. */
1759
1760 /* Clear out all variables saying what to do when inferior is continued.
1761 First do this, then set the ones you want, then call `proceed'. */
1762
1763 static void
1764 clear_proceed_status_thread (struct thread_info *tp)
1765 {
1766 if (debug_infrun)
1767 fprintf_unfiltered (gdb_stdlog,
1768 "infrun: clear_proceed_status_thread (%s)\n",
1769 target_pid_to_str (tp->ptid));
1770
1771 tp->trap_expected = 0;
1772 tp->step_range_start = 0;
1773 tp->step_range_end = 0;
1774 tp->step_frame_id = null_frame_id;
1775 tp->step_stack_frame_id = null_frame_id;
1776 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1777 tp->stop_requested = 0;
1778
1779 tp->stop_step = 0;
1780
1781 tp->proceed_to_finish = 0;
1782
1783 /* Discard any remaining commands or status from previous stop. */
1784 bpstat_clear (&tp->stop_bpstat);
1785 }
1786
1787 static int
1788 clear_proceed_status_callback (struct thread_info *tp, void *data)
1789 {
1790 if (is_exited (tp->ptid))
1791 return 0;
1792
1793 clear_proceed_status_thread (tp);
1794 return 0;
1795 }
1796
1797 void
1798 clear_proceed_status (void)
1799 {
1800 if (!non_stop)
1801 {
1802 /* In all-stop mode, delete the per-thread status of all
1803 threads, even if inferior_ptid is null_ptid, there may be
1804 threads on the list. E.g., we may be launching a new
1805 process, while selecting the executable. */
1806 iterate_over_threads (clear_proceed_status_callback, NULL);
1807 }
1808
1809 if (!ptid_equal (inferior_ptid, null_ptid))
1810 {
1811 struct inferior *inferior;
1812
1813 if (non_stop)
1814 {
1815 /* If in non-stop mode, only delete the per-thread status of
1816 the current thread. */
1817 clear_proceed_status_thread (inferior_thread ());
1818 }
1819
1820 inferior = current_inferior ();
1821 inferior->stop_soon = NO_STOP_QUIETLY;
1822 }
1823
1824 stop_after_trap = 0;
1825
1826 observer_notify_about_to_proceed ();
1827
1828 if (stop_registers)
1829 {
1830 regcache_xfree (stop_registers);
1831 stop_registers = NULL;
1832 }
1833 }
1834
1835 /* Check the current thread against the thread that reported the most recent
1836 event. If a step-over is required return TRUE and set the current thread
1837 to the old thread. Otherwise return FALSE.
1838
1839 This should be suitable for any targets that support threads. */
1840
1841 static int
1842 prepare_to_proceed (int step)
1843 {
1844 ptid_t wait_ptid;
1845 struct target_waitstatus wait_status;
1846 int schedlock_enabled;
1847
1848 /* With non-stop mode on, threads are always handled individually. */
1849 gdb_assert (! non_stop);
1850
1851 /* Get the last target status returned by target_wait(). */
1852 get_last_target_status (&wait_ptid, &wait_status);
1853
1854 /* Make sure we were stopped at a breakpoint. */
1855 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1856 || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1857 && wait_status.value.sig != TARGET_SIGNAL_ILL
1858 && wait_status.value.sig != TARGET_SIGNAL_SEGV
1859 && wait_status.value.sig != TARGET_SIGNAL_EMT))
1860 {
1861 return 0;
1862 }
1863
1864 schedlock_enabled = (scheduler_mode == schedlock_on
1865 || (scheduler_mode == schedlock_step
1866 && step));
1867
1868 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1869 if (schedlock_enabled)
1870 return 0;
1871
1872 /* Don't switch over if we're about to resume some other process
1873 other than WAIT_PTID's, and schedule-multiple is off. */
1874 if (!sched_multi
1875 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1876 return 0;
1877
1878 /* Switched over from WAIT_PID. */
1879 if (!ptid_equal (wait_ptid, minus_one_ptid)
1880 && !ptid_equal (inferior_ptid, wait_ptid))
1881 {
1882 struct regcache *regcache = get_thread_regcache (wait_ptid);
1883
1884 if (breakpoint_here_p (get_regcache_aspace (regcache),
1885 regcache_read_pc (regcache)))
1886 {
1887 /* If stepping, remember current thread to switch back to. */
1888 if (step)
1889 deferred_step_ptid = inferior_ptid;
1890
1891 /* Switch back to WAIT_PID thread. */
1892 switch_to_thread (wait_ptid);
1893
1894 /* We return 1 to indicate that there is a breakpoint here,
1895 so we need to step over it before continuing to avoid
1896 hitting it straight away. */
1897 return 1;
1898 }
1899 }
1900
1901 return 0;
1902 }
1903
1904 /* Basic routine for continuing the program in various fashions.
1905
1906 ADDR is the address to resume at, or -1 for resume where stopped.
1907 SIGGNAL is the signal to give it, or 0 for none,
1908 or -1 for act according to how it stopped.
1909 STEP is nonzero if should trap after one instruction.
1910 -1 means return after that and print nothing.
1911 You should probably set various step_... variables
1912 before calling here, if you are stepping.
1913
1914 You should call clear_proceed_status before calling proceed. */
1915
1916 void
1917 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1918 {
1919 struct regcache *regcache;
1920 struct gdbarch *gdbarch;
1921 struct thread_info *tp;
1922 CORE_ADDR pc;
1923 struct address_space *aspace;
1924 int oneproc = 0;
1925
1926 /* If we're stopped at a fork/vfork, follow the branch set by the
1927 "set follow-fork-mode" command; otherwise, we'll just proceed
1928 resuming the current thread. */
1929 if (!follow_fork ())
1930 {
1931 /* The target for some reason decided not to resume. */
1932 normal_stop ();
1933 return;
1934 }
1935
1936 regcache = get_current_regcache ();
1937 gdbarch = get_regcache_arch (regcache);
1938 aspace = get_regcache_aspace (regcache);
1939 pc = regcache_read_pc (regcache);
1940
1941 if (step > 0)
1942 step_start_function = find_pc_function (pc);
1943 if (step < 0)
1944 stop_after_trap = 1;
1945
1946 if (addr == (CORE_ADDR) -1)
1947 {
1948 if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1949 && execution_direction != EXEC_REVERSE)
1950 /* There is a breakpoint at the address we will resume at,
1951 step one instruction before inserting breakpoints so that
1952 we do not stop right away (and report a second hit at this
1953 breakpoint).
1954
1955 Note, we don't do this in reverse, because we won't
1956 actually be executing the breakpoint insn anyway.
1957 We'll be (un-)executing the previous instruction. */
1958
1959 oneproc = 1;
1960 else if (gdbarch_single_step_through_delay_p (gdbarch)
1961 && gdbarch_single_step_through_delay (gdbarch,
1962 get_current_frame ()))
1963 /* We stepped onto an instruction that needs to be stepped
1964 again before re-inserting the breakpoint, do so. */
1965 oneproc = 1;
1966 }
1967 else
1968 {
1969 regcache_write_pc (regcache, addr);
1970 }
1971
1972 if (debug_infrun)
1973 fprintf_unfiltered (gdb_stdlog,
1974 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1975 paddress (gdbarch, addr), siggnal, step);
1976
1977 /* We're handling a live event, so make sure we're doing live
1978 debugging. If we're looking at traceframes while the target is
1979 running, we're going to need to get back to that mode after
1980 handling the event. */
1981 if (non_stop)
1982 {
1983 make_cleanup_restore_current_traceframe ();
1984 set_traceframe_number (-1);
1985 }
1986
1987 if (non_stop)
1988 /* In non-stop, each thread is handled individually. The context
1989 must already be set to the right thread here. */
1990 ;
1991 else
1992 {
1993 /* In a multi-threaded task we may select another thread and
1994 then continue or step.
1995
1996 But if the old thread was stopped at a breakpoint, it will
1997 immediately cause another breakpoint stop without any
1998 execution (i.e. it will report a breakpoint hit incorrectly).
1999 So we must step over it first.
2000
2001 prepare_to_proceed checks the current thread against the
2002 thread that reported the most recent event. If a step-over
2003 is required it returns TRUE and sets the current thread to
2004 the old thread. */
2005 if (prepare_to_proceed (step))
2006 oneproc = 1;
2007 }
2008
2009 /* prepare_to_proceed may change the current thread. */
2010 tp = inferior_thread ();
2011
2012 if (oneproc)
2013 {
2014 tp->trap_expected = 1;
2015 /* If displaced stepping is enabled, we can step over the
2016 breakpoint without hitting it, so leave all breakpoints
2017 inserted. Otherwise we need to disable all breakpoints, step
2018 one instruction, and then re-add them when that step is
2019 finished. */
2020 if (!use_displaced_stepping (gdbarch))
2021 remove_breakpoints ();
2022 }
2023
2024 /* We can insert breakpoints if we're not trying to step over one,
2025 or if we are stepping over one but we're using displaced stepping
2026 to do so. */
2027 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
2028 insert_breakpoints ();
2029
2030 if (!non_stop)
2031 {
2032 /* Pass the last stop signal to the thread we're resuming,
2033 irrespective of whether the current thread is the thread that
2034 got the last event or not. This was historically GDB's
2035 behaviour before keeping a stop_signal per thread. */
2036
2037 struct thread_info *last_thread;
2038 ptid_t last_ptid;
2039 struct target_waitstatus last_status;
2040
2041 get_last_target_status (&last_ptid, &last_status);
2042 if (!ptid_equal (inferior_ptid, last_ptid)
2043 && !ptid_equal (last_ptid, null_ptid)
2044 && !ptid_equal (last_ptid, minus_one_ptid))
2045 {
2046 last_thread = find_thread_ptid (last_ptid);
2047 if (last_thread)
2048 {
2049 tp->stop_signal = last_thread->stop_signal;
2050 last_thread->stop_signal = TARGET_SIGNAL_0;
2051 }
2052 }
2053 }
2054
2055 if (siggnal != TARGET_SIGNAL_DEFAULT)
2056 tp->stop_signal = siggnal;
2057 /* If this signal should not be seen by program,
2058 give it zero. Used for debugging signals. */
2059 else if (!signal_program[tp->stop_signal])
2060 tp->stop_signal = TARGET_SIGNAL_0;
2061
2062 annotate_starting ();
2063
2064 /* Make sure that output from GDB appears before output from the
2065 inferior. */
2066 gdb_flush (gdb_stdout);
2067
2068 /* Refresh prev_pc value just prior to resuming. This used to be
2069 done in stop_stepping, however, setting prev_pc there did not handle
2070 scenarios such as inferior function calls or returning from
2071 a function via the return command. In those cases, the prev_pc
2072 value was not set properly for subsequent commands. The prev_pc value
2073 is used to initialize the starting line number in the ecs. With an
2074 invalid value, the gdb next command ends up stopping at the position
2075 represented by the next line table entry past our start position.
2076 On platforms that generate one line table entry per line, this
2077 is not a problem. However, on the ia64, the compiler generates
2078 extraneous line table entries that do not increase the line number.
2079 When we issue the gdb next command on the ia64 after an inferior call
2080 or a return command, we often end up a few instructions forward, still
2081 within the original line we started.
2082
2083 An attempt was made to refresh the prev_pc at the same time the
2084 execution_control_state is initialized (for instance, just before
2085 waiting for an inferior event). But this approach did not work
2086 because of platforms that use ptrace, where the pc register cannot
2087 be read unless the inferior is stopped. At that point, we are not
2088 guaranteed the inferior is stopped and so the regcache_read_pc() call
2089 can fail. Setting the prev_pc value here ensures the value is updated
2090 correctly when the inferior is stopped. */
2091 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2092
2093 /* Fill in with reasonable starting values. */
2094 init_thread_stepping_state (tp);
2095
2096 /* Reset to normal state. */
2097 init_infwait_state ();
2098
2099 /* Resume inferior. */
2100 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
2101
2102 /* Wait for it to stop (if not standalone)
2103 and in any case decode why it stopped, and act accordingly. */
2104 /* Do this only if we are not using the event loop, or if the target
2105 does not support asynchronous execution. */
2106 if (!target_can_async_p ())
2107 {
2108 wait_for_inferior (0);
2109 normal_stop ();
2110 }
2111 }
2112 \f
2113
2114 /* Start remote-debugging of a machine over a serial link. */
2115
2116 void
2117 start_remote (int from_tty)
2118 {
2119 struct inferior *inferior;
2120
2121 init_wait_for_inferior ();
2122 inferior = current_inferior ();
2123 inferior->stop_soon = STOP_QUIETLY_REMOTE;
2124
2125 /* Always go on waiting for the target, regardless of the mode. */
2126 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2127 indicate to wait_for_inferior that a target should timeout if
2128 nothing is returned (instead of just blocking). Because of this,
2129 targets expecting an immediate response need to, internally, set
2130 things up so that the target_wait() is forced to eventually
2131 timeout. */
2132 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2133 differentiate to its caller what the state of the target is after
2134 the initial open has been performed. Here we're assuming that
2135 the target has stopped. It should be possible to eventually have
2136 target_open() return to the caller an indication that the target
2137 is currently running and GDB state should be set to the same as
2138 for an async run. */
2139 wait_for_inferior (0);
2140
2141 /* Now that the inferior has stopped, do any bookkeeping like
2142 loading shared libraries. We want to do this before normal_stop,
2143 so that the displayed frame is up to date. */
2144 post_create_inferior (&current_target, from_tty);
2145
2146 normal_stop ();
2147 }
2148
2149 /* Initialize static vars when a new inferior begins. */
2150
2151 void
2152 init_wait_for_inferior (void)
2153 {
2154 /* These are meaningless until the first time through wait_for_inferior. */
2155
2156 breakpoint_init_inferior (inf_starting);
2157
2158 clear_proceed_status ();
2159
2160 stepping_past_singlestep_breakpoint = 0;
2161 deferred_step_ptid = null_ptid;
2162
2163 target_last_wait_ptid = minus_one_ptid;
2164
2165 previous_inferior_ptid = null_ptid;
2166 init_infwait_state ();
2167
2168 /* Discard any skipped inlined frames. */
2169 clear_inline_frame_state (minus_one_ptid);
2170 }
2171
2172 \f
2173 /* This enum encodes possible reasons for doing a target_wait, so that
2174 wfi can call target_wait in one place. (Ultimately the call will be
2175 moved out of the infinite loop entirely.) */
2176
2177 enum infwait_states
2178 {
2179 infwait_normal_state,
2180 infwait_thread_hop_state,
2181 infwait_step_watch_state,
2182 infwait_nonstep_watch_state
2183 };
2184
2185 /* The PTID we'll do a target_wait on.*/
2186 ptid_t waiton_ptid;
2187
2188 /* Current inferior wait state. */
2189 enum infwait_states infwait_state;
2190
2191 /* Data to be passed around while handling an event. This data is
2192 discarded between events. */
2193 struct execution_control_state
2194 {
2195 ptid_t ptid;
2196 /* The thread that got the event, if this was a thread event; NULL
2197 otherwise. */
2198 struct thread_info *event_thread;
2199
2200 struct target_waitstatus ws;
2201 int random_signal;
2202 CORE_ADDR stop_func_start;
2203 CORE_ADDR stop_func_end;
2204 char *stop_func_name;
2205 int new_thread_event;
2206 int wait_some_more;
2207 };
2208
2209 static void handle_inferior_event (struct execution_control_state *ecs);
2210
2211 static void handle_step_into_function (struct gdbarch *gdbarch,
2212 struct execution_control_state *ecs);
2213 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2214 struct execution_control_state *ecs);
2215 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2216 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2217 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2218 struct symtab_and_line sr_sal,
2219 struct frame_id sr_id);
2220 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2221
2222 static void stop_stepping (struct execution_control_state *ecs);
2223 static void prepare_to_wait (struct execution_control_state *ecs);
2224 static void keep_going (struct execution_control_state *ecs);
2225
2226 /* Callback for iterate over threads. If the thread is stopped, but
2227 the user/frontend doesn't know about that yet, go through
2228 normal_stop, as if the thread had just stopped now. ARG points at
2229 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2230 ptid_is_pid(PTID) is true, applies to all threads of the process
2231 pointed at by PTID. Otherwise, apply only to the thread pointed by
2232 PTID. */
2233
2234 static int
2235 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2236 {
2237 ptid_t ptid = * (ptid_t *) arg;
2238
2239 if ((ptid_equal (info->ptid, ptid)
2240 || ptid_equal (minus_one_ptid, ptid)
2241 || (ptid_is_pid (ptid)
2242 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2243 && is_running (info->ptid)
2244 && !is_executing (info->ptid))
2245 {
2246 struct cleanup *old_chain;
2247 struct execution_control_state ecss;
2248 struct execution_control_state *ecs = &ecss;
2249
2250 memset (ecs, 0, sizeof (*ecs));
2251
2252 old_chain = make_cleanup_restore_current_thread ();
2253
2254 switch_to_thread (info->ptid);
2255
2256 /* Go through handle_inferior_event/normal_stop, so we always
2257 have consistent output as if the stop event had been
2258 reported. */
2259 ecs->ptid = info->ptid;
2260 ecs->event_thread = find_thread_ptid (info->ptid);
2261 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2262 ecs->ws.value.sig = TARGET_SIGNAL_0;
2263
2264 handle_inferior_event (ecs);
2265
2266 if (!ecs->wait_some_more)
2267 {
2268 struct thread_info *tp;
2269
2270 normal_stop ();
2271
2272 /* Finish off the continuations. The continations
2273 themselves are responsible for realising the thread
2274 didn't finish what it was supposed to do. */
2275 tp = inferior_thread ();
2276 do_all_intermediate_continuations_thread (tp);
2277 do_all_continuations_thread (tp);
2278 }
2279
2280 do_cleanups (old_chain);
2281 }
2282
2283 return 0;
2284 }
2285
2286 /* This function is attached as a "thread_stop_requested" observer.
2287 Cleanup local state that assumed the PTID was to be resumed, and
2288 report the stop to the frontend. */
2289
2290 static void
2291 infrun_thread_stop_requested (ptid_t ptid)
2292 {
2293 struct displaced_step_inferior_state *displaced;
2294
2295 /* PTID was requested to stop. Remove it from the displaced
2296 stepping queue, so we don't try to resume it automatically. */
2297
2298 for (displaced = displaced_step_inferior_states;
2299 displaced;
2300 displaced = displaced->next)
2301 {
2302 struct displaced_step_request *it, **prev_next_p;
2303
2304 it = displaced->step_request_queue;
2305 prev_next_p = &displaced->step_request_queue;
2306 while (it)
2307 {
2308 if (ptid_match (it->ptid, ptid))
2309 {
2310 *prev_next_p = it->next;
2311 it->next = NULL;
2312 xfree (it);
2313 }
2314 else
2315 {
2316 prev_next_p = &it->next;
2317 }
2318
2319 it = *prev_next_p;
2320 }
2321 }
2322
2323 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2324 }
2325
2326 static void
2327 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2328 {
2329 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2330 nullify_last_target_wait_ptid ();
2331 }
2332
2333 /* Callback for iterate_over_threads. */
2334
2335 static int
2336 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2337 {
2338 if (is_exited (info->ptid))
2339 return 0;
2340
2341 delete_step_resume_breakpoint (info);
2342 return 0;
2343 }
2344
2345 /* In all-stop, delete the step resume breakpoint of any thread that
2346 had one. In non-stop, delete the step resume breakpoint of the
2347 thread that just stopped. */
2348
2349 static void
2350 delete_step_thread_step_resume_breakpoint (void)
2351 {
2352 if (!target_has_execution
2353 || ptid_equal (inferior_ptid, null_ptid))
2354 /* If the inferior has exited, we have already deleted the step
2355 resume breakpoints out of GDB's lists. */
2356 return;
2357
2358 if (non_stop)
2359 {
2360 /* If in non-stop mode, only delete the step-resume or
2361 longjmp-resume breakpoint of the thread that just stopped
2362 stepping. */
2363 struct thread_info *tp = inferior_thread ();
2364
2365 delete_step_resume_breakpoint (tp);
2366 }
2367 else
2368 /* In all-stop mode, delete all step-resume and longjmp-resume
2369 breakpoints of any thread that had them. */
2370 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2371 }
2372
2373 /* A cleanup wrapper. */
2374
2375 static void
2376 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2377 {
2378 delete_step_thread_step_resume_breakpoint ();
2379 }
2380
2381 /* Pretty print the results of target_wait, for debugging purposes. */
2382
2383 static void
2384 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2385 const struct target_waitstatus *ws)
2386 {
2387 char *status_string = target_waitstatus_to_string (ws);
2388 struct ui_file *tmp_stream = mem_fileopen ();
2389 char *text;
2390
2391 /* The text is split over several lines because it was getting too long.
2392 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2393 output as a unit; we want only one timestamp printed if debug_timestamp
2394 is set. */
2395
2396 fprintf_unfiltered (tmp_stream,
2397 "infrun: target_wait (%d", PIDGET (waiton_ptid));
2398 if (PIDGET (waiton_ptid) != -1)
2399 fprintf_unfiltered (tmp_stream,
2400 " [%s]", target_pid_to_str (waiton_ptid));
2401 fprintf_unfiltered (tmp_stream, ", status) =\n");
2402 fprintf_unfiltered (tmp_stream,
2403 "infrun: %d [%s],\n",
2404 PIDGET (result_ptid), target_pid_to_str (result_ptid));
2405 fprintf_unfiltered (tmp_stream,
2406 "infrun: %s\n",
2407 status_string);
2408
2409 text = ui_file_xstrdup (tmp_stream, NULL);
2410
2411 /* This uses %s in part to handle %'s in the text, but also to avoid
2412 a gcc error: the format attribute requires a string literal. */
2413 fprintf_unfiltered (gdb_stdlog, "%s", text);
2414
2415 xfree (status_string);
2416 xfree (text);
2417 ui_file_delete (tmp_stream);
2418 }
2419
2420 /* Prepare and stabilize the inferior for detaching it. E.g.,
2421 detaching while a thread is displaced stepping is a recipe for
2422 crashing it, as nothing would readjust the PC out of the scratch
2423 pad. */
2424
2425 void
2426 prepare_for_detach (void)
2427 {
2428 struct inferior *inf = current_inferior ();
2429 ptid_t pid_ptid = pid_to_ptid (inf->pid);
2430 struct cleanup *old_chain_1;
2431 struct displaced_step_inferior_state *displaced;
2432
2433 displaced = get_displaced_stepping_state (inf->pid);
2434
2435 /* Is any thread of this process displaced stepping? If not,
2436 there's nothing else to do. */
2437 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2438 return;
2439
2440 if (debug_infrun)
2441 fprintf_unfiltered (gdb_stdlog,
2442 "displaced-stepping in-process while detaching");
2443
2444 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2445 inf->detaching = 1;
2446
2447 while (!ptid_equal (displaced->step_ptid, null_ptid))
2448 {
2449 struct cleanup *old_chain_2;
2450 struct execution_control_state ecss;
2451 struct execution_control_state *ecs;
2452
2453 ecs = &ecss;
2454 memset (ecs, 0, sizeof (*ecs));
2455
2456 overlay_cache_invalid = 1;
2457
2458 /* We have to invalidate the registers BEFORE calling
2459 target_wait because they can be loaded from the target while
2460 in target_wait. This makes remote debugging a bit more
2461 efficient for those targets that provide critical registers
2462 as part of their normal status mechanism. */
2463
2464 registers_changed ();
2465
2466 if (deprecated_target_wait_hook)
2467 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2468 else
2469 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2470
2471 if (debug_infrun)
2472 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2473
2474 /* If an error happens while handling the event, propagate GDB's
2475 knowledge of the executing state to the frontend/user running
2476 state. */
2477 old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2478
2479 /* In non-stop mode, each thread is handled individually.
2480 Switch early, so the global state is set correctly for this
2481 thread. */
2482 if (non_stop
2483 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2484 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2485 context_switch (ecs->ptid);
2486
2487 /* Now figure out what to do with the result of the result. */
2488 handle_inferior_event (ecs);
2489
2490 /* No error, don't finish the state yet. */
2491 discard_cleanups (old_chain_2);
2492
2493 /* Breakpoints and watchpoints are not installed on the target
2494 at this point, and signals are passed directly to the
2495 inferior, so this must mean the process is gone. */
2496 if (!ecs->wait_some_more)
2497 {
2498 discard_cleanups (old_chain_1);
2499 error (_("Program exited while detaching"));
2500 }
2501 }
2502
2503 discard_cleanups (old_chain_1);
2504 }
2505
2506 /* Wait for control to return from inferior to debugger.
2507
2508 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2509 as if they were SIGTRAP signals. This can be useful during
2510 the startup sequence on some targets such as HP/UX, where
2511 we receive an EXEC event instead of the expected SIGTRAP.
2512
2513 If inferior gets a signal, we may decide to start it up again
2514 instead of returning. That is why there is a loop in this function.
2515 When this function actually returns it means the inferior
2516 should be left stopped and GDB should read more commands. */
2517
2518 void
2519 wait_for_inferior (int treat_exec_as_sigtrap)
2520 {
2521 struct cleanup *old_cleanups;
2522 struct execution_control_state ecss;
2523 struct execution_control_state *ecs;
2524
2525 if (debug_infrun)
2526 fprintf_unfiltered
2527 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2528 treat_exec_as_sigtrap);
2529
2530 old_cleanups =
2531 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2532
2533 ecs = &ecss;
2534 memset (ecs, 0, sizeof (*ecs));
2535
2536 /* We'll update this if & when we switch to a new thread. */
2537 previous_inferior_ptid = inferior_ptid;
2538
2539 while (1)
2540 {
2541 struct cleanup *old_chain;
2542
2543 /* We have to invalidate the registers BEFORE calling target_wait
2544 because they can be loaded from the target while in target_wait.
2545 This makes remote debugging a bit more efficient for those
2546 targets that provide critical registers as part of their normal
2547 status mechanism. */
2548
2549 overlay_cache_invalid = 1;
2550 registers_changed ();
2551
2552 if (deprecated_target_wait_hook)
2553 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2554 else
2555 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2556
2557 if (debug_infrun)
2558 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2559
2560 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2561 {
2562 xfree (ecs->ws.value.execd_pathname);
2563 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2564 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2565 }
2566
2567 /* If an error happens while handling the event, propagate GDB's
2568 knowledge of the executing state to the frontend/user running
2569 state. */
2570 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2571
2572 if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2573 || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2574 ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2575
2576 /* Now figure out what to do with the result of the result. */
2577 handle_inferior_event (ecs);
2578
2579 /* No error, don't finish the state yet. */
2580 discard_cleanups (old_chain);
2581
2582 if (!ecs->wait_some_more)
2583 break;
2584 }
2585
2586 do_cleanups (old_cleanups);
2587 }
2588
2589 /* Asynchronous version of wait_for_inferior. It is called by the
2590 event loop whenever a change of state is detected on the file
2591 descriptor corresponding to the target. It can be called more than
2592 once to complete a single execution command. In such cases we need
2593 to keep the state in a global variable ECSS. If it is the last time
2594 that this function is called for a single execution command, then
2595 report to the user that the inferior has stopped, and do the
2596 necessary cleanups. */
2597
2598 void
2599 fetch_inferior_event (void *client_data)
2600 {
2601 struct execution_control_state ecss;
2602 struct execution_control_state *ecs = &ecss;
2603 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2604 struct cleanup *ts_old_chain;
2605 int was_sync = sync_execution;
2606
2607 memset (ecs, 0, sizeof (*ecs));
2608
2609 /* We'll update this if & when we switch to a new thread. */
2610 previous_inferior_ptid = inferior_ptid;
2611
2612 if (non_stop)
2613 /* In non-stop mode, the user/frontend should not notice a thread
2614 switch due to internal events. Make sure we reverse to the
2615 user selected thread and frame after handling the event and
2616 running any breakpoint commands. */
2617 make_cleanup_restore_current_thread ();
2618
2619 /* We have to invalidate the registers BEFORE calling target_wait
2620 because they can be loaded from the target while in target_wait.
2621 This makes remote debugging a bit more efficient for those
2622 targets that provide critical registers as part of their normal
2623 status mechanism. */
2624
2625 overlay_cache_invalid = 1;
2626 registers_changed ();
2627
2628 if (deprecated_target_wait_hook)
2629 ecs->ptid =
2630 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2631 else
2632 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2633
2634 if (debug_infrun)
2635 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2636
2637 if (non_stop
2638 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2639 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2640 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2641 /* In non-stop mode, each thread is handled individually. Switch
2642 early, so the global state is set correctly for this
2643 thread. */
2644 context_switch (ecs->ptid);
2645
2646 /* If an error happens while handling the event, propagate GDB's
2647 knowledge of the executing state to the frontend/user running
2648 state. */
2649 if (!non_stop)
2650 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2651 else
2652 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2653
2654 /* Now figure out what to do with the result of the result. */
2655 handle_inferior_event (ecs);
2656
2657 if (!ecs->wait_some_more)
2658 {
2659 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2660
2661 delete_step_thread_step_resume_breakpoint ();
2662
2663 /* We may not find an inferior if this was a process exit. */
2664 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2665 normal_stop ();
2666
2667 if (target_has_execution
2668 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2669 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2670 && ecs->event_thread->step_multi
2671 && ecs->event_thread->stop_step)
2672 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2673 else
2674 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2675 }
2676
2677 /* No error, don't finish the thread states yet. */
2678 discard_cleanups (ts_old_chain);
2679
2680 /* Revert thread and frame. */
2681 do_cleanups (old_chain);
2682
2683 /* If the inferior was in sync execution mode, and now isn't,
2684 restore the prompt. */
2685 if (was_sync && !sync_execution)
2686 display_gdb_prompt (0);
2687 }
2688
2689 /* Record the frame and location we're currently stepping through. */
2690 void
2691 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2692 {
2693 struct thread_info *tp = inferior_thread ();
2694
2695 tp->step_frame_id = get_frame_id (frame);
2696 tp->step_stack_frame_id = get_stack_frame_id (frame);
2697
2698 tp->current_symtab = sal.symtab;
2699 tp->current_line = sal.line;
2700 }
2701
2702 /* Clear context switchable stepping state. */
2703
2704 void
2705 init_thread_stepping_state (struct thread_info *tss)
2706 {
2707 tss->stepping_over_breakpoint = 0;
2708 tss->step_after_step_resume_breakpoint = 0;
2709 tss->stepping_through_solib_after_catch = 0;
2710 tss->stepping_through_solib_catchpoints = NULL;
2711 }
2712
2713 /* Return the cached copy of the last pid/waitstatus returned by
2714 target_wait()/deprecated_target_wait_hook(). The data is actually
2715 cached by handle_inferior_event(), which gets called immediately
2716 after target_wait()/deprecated_target_wait_hook(). */
2717
2718 void
2719 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2720 {
2721 *ptidp = target_last_wait_ptid;
2722 *status = target_last_waitstatus;
2723 }
2724
2725 void
2726 nullify_last_target_wait_ptid (void)
2727 {
2728 target_last_wait_ptid = minus_one_ptid;
2729 }
2730
2731 /* Switch thread contexts. */
2732
2733 static void
2734 context_switch (ptid_t ptid)
2735 {
2736 if (debug_infrun)
2737 {
2738 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2739 target_pid_to_str (inferior_ptid));
2740 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2741 target_pid_to_str (ptid));
2742 }
2743
2744 switch_to_thread (ptid);
2745 }
2746
2747 static void
2748 adjust_pc_after_break (struct execution_control_state *ecs)
2749 {
2750 struct regcache *regcache;
2751 struct gdbarch *gdbarch;
2752 struct address_space *aspace;
2753 CORE_ADDR breakpoint_pc;
2754
2755 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2756 we aren't, just return.
2757
2758 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2759 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2760 implemented by software breakpoints should be handled through the normal
2761 breakpoint layer.
2762
2763 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2764 different signals (SIGILL or SIGEMT for instance), but it is less
2765 clear where the PC is pointing afterwards. It may not match
2766 gdbarch_decr_pc_after_break. I don't know any specific target that
2767 generates these signals at breakpoints (the code has been in GDB since at
2768 least 1992) so I can not guess how to handle them here.
2769
2770 In earlier versions of GDB, a target with
2771 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2772 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2773 target with both of these set in GDB history, and it seems unlikely to be
2774 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2775
2776 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2777 return;
2778
2779 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2780 return;
2781
2782 /* In reverse execution, when a breakpoint is hit, the instruction
2783 under it has already been de-executed. The reported PC always
2784 points at the breakpoint address, so adjusting it further would
2785 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2786 architecture:
2787
2788 B1 0x08000000 : INSN1
2789 B2 0x08000001 : INSN2
2790 0x08000002 : INSN3
2791 PC -> 0x08000003 : INSN4
2792
2793 Say you're stopped at 0x08000003 as above. Reverse continuing
2794 from that point should hit B2 as below. Reading the PC when the
2795 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2796 been de-executed already.
2797
2798 B1 0x08000000 : INSN1
2799 B2 PC -> 0x08000001 : INSN2
2800 0x08000002 : INSN3
2801 0x08000003 : INSN4
2802
2803 We can't apply the same logic as for forward execution, because
2804 we would wrongly adjust the PC to 0x08000000, since there's a
2805 breakpoint at PC - 1. We'd then report a hit on B1, although
2806 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2807 behaviour. */
2808 if (execution_direction == EXEC_REVERSE)
2809 return;
2810
2811 /* If this target does not decrement the PC after breakpoints, then
2812 we have nothing to do. */
2813 regcache = get_thread_regcache (ecs->ptid);
2814 gdbarch = get_regcache_arch (regcache);
2815 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2816 return;
2817
2818 aspace = get_regcache_aspace (regcache);
2819
2820 /* Find the location where (if we've hit a breakpoint) the
2821 breakpoint would be. */
2822 breakpoint_pc = regcache_read_pc (regcache)
2823 - gdbarch_decr_pc_after_break (gdbarch);
2824
2825 /* Check whether there actually is a software breakpoint inserted at
2826 that location.
2827
2828 If in non-stop mode, a race condition is possible where we've
2829 removed a breakpoint, but stop events for that breakpoint were
2830 already queued and arrive later. To suppress those spurious
2831 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2832 and retire them after a number of stop events are reported. */
2833 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2834 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2835 {
2836 struct cleanup *old_cleanups = NULL;
2837
2838 if (RECORD_IS_USED)
2839 old_cleanups = record_gdb_operation_disable_set ();
2840
2841 /* When using hardware single-step, a SIGTRAP is reported for both
2842 a completed single-step and a software breakpoint. Need to
2843 differentiate between the two, as the latter needs adjusting
2844 but the former does not.
2845
2846 The SIGTRAP can be due to a completed hardware single-step only if
2847 - we didn't insert software single-step breakpoints
2848 - the thread to be examined is still the current thread
2849 - this thread is currently being stepped
2850
2851 If any of these events did not occur, we must have stopped due
2852 to hitting a software breakpoint, and have to back up to the
2853 breakpoint address.
2854
2855 As a special case, we could have hardware single-stepped a
2856 software breakpoint. In this case (prev_pc == breakpoint_pc),
2857 we also need to back up to the breakpoint address. */
2858
2859 if (singlestep_breakpoints_inserted_p
2860 || !ptid_equal (ecs->ptid, inferior_ptid)
2861 || !currently_stepping (ecs->event_thread)
2862 || ecs->event_thread->prev_pc == breakpoint_pc)
2863 regcache_write_pc (regcache, breakpoint_pc);
2864
2865 if (RECORD_IS_USED)
2866 do_cleanups (old_cleanups);
2867 }
2868 }
2869
2870 void
2871 init_infwait_state (void)
2872 {
2873 waiton_ptid = pid_to_ptid (-1);
2874 infwait_state = infwait_normal_state;
2875 }
2876
2877 void
2878 error_is_running (void)
2879 {
2880 error (_("\
2881 Cannot execute this command while the selected thread is running."));
2882 }
2883
2884 void
2885 ensure_not_running (void)
2886 {
2887 if (is_running (inferior_ptid))
2888 error_is_running ();
2889 }
2890
2891 static int
2892 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2893 {
2894 for (frame = get_prev_frame (frame);
2895 frame != NULL;
2896 frame = get_prev_frame (frame))
2897 {
2898 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2899 return 1;
2900 if (get_frame_type (frame) != INLINE_FRAME)
2901 break;
2902 }
2903
2904 return 0;
2905 }
2906
2907 /* Auxiliary function that handles syscall entry/return events.
2908 It returns 1 if the inferior should keep going (and GDB
2909 should ignore the event), or 0 if the event deserves to be
2910 processed. */
2911
2912 static int
2913 handle_syscall_event (struct execution_control_state *ecs)
2914 {
2915 struct regcache *regcache;
2916 struct gdbarch *gdbarch;
2917 int syscall_number;
2918
2919 if (!ptid_equal (ecs->ptid, inferior_ptid))
2920 context_switch (ecs->ptid);
2921
2922 regcache = get_thread_regcache (ecs->ptid);
2923 gdbarch = get_regcache_arch (regcache);
2924 syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2925 stop_pc = regcache_read_pc (regcache);
2926
2927 target_last_waitstatus.value.syscall_number = syscall_number;
2928
2929 if (catch_syscall_enabled () > 0
2930 && catching_syscall_number (syscall_number) > 0)
2931 {
2932 if (debug_infrun)
2933 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2934 syscall_number);
2935
2936 ecs->event_thread->stop_bpstat
2937 = bpstat_stop_status (get_regcache_aspace (regcache),
2938 stop_pc, ecs->ptid);
2939 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2940
2941 if (!ecs->random_signal)
2942 {
2943 /* Catchpoint hit. */
2944 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2945 return 0;
2946 }
2947 }
2948
2949 /* If no catchpoint triggered for this, then keep going. */
2950 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2951 keep_going (ecs);
2952 return 1;
2953 }
2954
2955 /* Given an execution control state that has been freshly filled in
2956 by an event from the inferior, figure out what it means and take
2957 appropriate action. */
2958
2959 static void
2960 handle_inferior_event (struct execution_control_state *ecs)
2961 {
2962 struct frame_info *frame;
2963 struct gdbarch *gdbarch;
2964 int sw_single_step_trap_p = 0;
2965 int stopped_by_watchpoint;
2966 int stepped_after_stopped_by_watchpoint = 0;
2967 struct symtab_and_line stop_pc_sal;
2968 enum stop_kind stop_soon;
2969
2970 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2971 {
2972 /* We had an event in the inferior, but we are not interested in
2973 handling it at this level. The lower layers have already
2974 done what needs to be done, if anything.
2975
2976 One of the possible circumstances for this is when the
2977 inferior produces output for the console. The inferior has
2978 not stopped, and we are ignoring the event. Another possible
2979 circumstance is any event which the lower level knows will be
2980 reported multiple times without an intervening resume. */
2981 if (debug_infrun)
2982 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2983 prepare_to_wait (ecs);
2984 return;
2985 }
2986
2987 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2988 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2989 {
2990 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2991
2992 gdb_assert (inf);
2993 stop_soon = inf->stop_soon;
2994 }
2995 else
2996 stop_soon = NO_STOP_QUIETLY;
2997
2998 /* Cache the last pid/waitstatus. */
2999 target_last_wait_ptid = ecs->ptid;
3000 target_last_waitstatus = ecs->ws;
3001
3002 /* Always clear state belonging to the previous time we stopped. */
3003 stop_stack_dummy = STOP_NONE;
3004
3005 /* If it's a new process, add it to the thread database */
3006
3007 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
3008 && !ptid_equal (ecs->ptid, minus_one_ptid)
3009 && !in_thread_list (ecs->ptid));
3010
3011 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3012 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
3013 add_thread (ecs->ptid);
3014
3015 ecs->event_thread = find_thread_ptid (ecs->ptid);
3016
3017 /* Dependent on valid ECS->EVENT_THREAD. */
3018 adjust_pc_after_break (ecs);
3019
3020 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3021 reinit_frame_cache ();
3022
3023 breakpoint_retire_moribund ();
3024
3025 /* First, distinguish signals caused by the debugger from signals
3026 that have to do with the program's own actions. Note that
3027 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3028 on the operating system version. Here we detect when a SIGILL or
3029 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3030 something similar for SIGSEGV, since a SIGSEGV will be generated
3031 when we're trying to execute a breakpoint instruction on a
3032 non-executable stack. This happens for call dummy breakpoints
3033 for architectures like SPARC that place call dummies on the
3034 stack. */
3035 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3036 && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3037 || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3038 || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3039 {
3040 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3041
3042 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3043 regcache_read_pc (regcache)))
3044 {
3045 if (debug_infrun)
3046 fprintf_unfiltered (gdb_stdlog,
3047 "infrun: Treating signal as SIGTRAP\n");
3048 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3049 }
3050 }
3051
3052 /* Mark the non-executing threads accordingly. In all-stop, all
3053 threads of all processes are stopped when we get any event
3054 reported. In non-stop mode, only the event thread stops. If
3055 we're handling a process exit in non-stop mode, there's nothing
3056 to do, as threads of the dead process are gone, and threads of
3057 any other process were left running. */
3058 if (!non_stop)
3059 set_executing (minus_one_ptid, 0);
3060 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3061 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3062 set_executing (inferior_ptid, 0);
3063
3064 switch (infwait_state)
3065 {
3066 case infwait_thread_hop_state:
3067 if (debug_infrun)
3068 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3069 break;
3070
3071 case infwait_normal_state:
3072 if (debug_infrun)
3073 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3074 break;
3075
3076 case infwait_step_watch_state:
3077 if (debug_infrun)
3078 fprintf_unfiltered (gdb_stdlog,
3079 "infrun: infwait_step_watch_state\n");
3080
3081 stepped_after_stopped_by_watchpoint = 1;
3082 break;
3083
3084 case infwait_nonstep_watch_state:
3085 if (debug_infrun)
3086 fprintf_unfiltered (gdb_stdlog,
3087 "infrun: infwait_nonstep_watch_state\n");
3088 insert_breakpoints ();
3089
3090 /* FIXME-maybe: is this cleaner than setting a flag? Does it
3091 handle things like signals arriving and other things happening
3092 in combination correctly? */
3093 stepped_after_stopped_by_watchpoint = 1;
3094 break;
3095
3096 default:
3097 internal_error (__FILE__, __LINE__, _("bad switch"));
3098 }
3099
3100 infwait_state = infwait_normal_state;
3101 waiton_ptid = pid_to_ptid (-1);
3102
3103 switch (ecs->ws.kind)
3104 {
3105 case TARGET_WAITKIND_LOADED:
3106 if (debug_infrun)
3107 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3108 /* Ignore gracefully during startup of the inferior, as it might
3109 be the shell which has just loaded some objects, otherwise
3110 add the symbols for the newly loaded objects. Also ignore at
3111 the beginning of an attach or remote session; we will query
3112 the full list of libraries once the connection is
3113 established. */
3114 if (stop_soon == NO_STOP_QUIETLY)
3115 {
3116 /* Check for any newly added shared libraries if we're
3117 supposed to be adding them automatically. Switch
3118 terminal for any messages produced by
3119 breakpoint_re_set. */
3120 target_terminal_ours_for_output ();
3121 /* NOTE: cagney/2003-11-25: Make certain that the target
3122 stack's section table is kept up-to-date. Architectures,
3123 (e.g., PPC64), use the section table to perform
3124 operations such as address => section name and hence
3125 require the table to contain all sections (including
3126 those found in shared libraries). */
3127 #ifdef SOLIB_ADD
3128 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3129 #else
3130 solib_add (NULL, 0, &current_target, auto_solib_add);
3131 #endif
3132 target_terminal_inferior ();
3133
3134 /* If requested, stop when the dynamic linker notifies
3135 gdb of events. This allows the user to get control
3136 and place breakpoints in initializer routines for
3137 dynamically loaded objects (among other things). */
3138 if (stop_on_solib_events)
3139 {
3140 /* Make sure we print "Stopped due to solib-event" in
3141 normal_stop. */
3142 stop_print_frame = 1;
3143
3144 stop_stepping (ecs);
3145 return;
3146 }
3147
3148 /* NOTE drow/2007-05-11: This might be a good place to check
3149 for "catch load". */
3150 }
3151
3152 /* If we are skipping through a shell, or through shared library
3153 loading that we aren't interested in, resume the program. If
3154 we're running the program normally, also resume. But stop if
3155 we're attaching or setting up a remote connection. */
3156 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3157 {
3158 /* Loading of shared libraries might have changed breakpoint
3159 addresses. Make sure new breakpoints are inserted. */
3160 if (stop_soon == NO_STOP_QUIETLY
3161 && !breakpoints_always_inserted_mode ())
3162 insert_breakpoints ();
3163 resume (0, TARGET_SIGNAL_0);
3164 prepare_to_wait (ecs);
3165 return;
3166 }
3167
3168 break;
3169
3170 case TARGET_WAITKIND_SPURIOUS:
3171 if (debug_infrun)
3172 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3173 resume (0, TARGET_SIGNAL_0);
3174 prepare_to_wait (ecs);
3175 return;
3176
3177 case TARGET_WAITKIND_EXITED:
3178 if (debug_infrun)
3179 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3180 inferior_ptid = ecs->ptid;
3181 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3182 set_current_program_space (current_inferior ()->pspace);
3183 handle_vfork_child_exec_or_exit (0);
3184 target_terminal_ours (); /* Must do this before mourn anyway */
3185 print_exited_reason (ecs->ws.value.integer);
3186
3187 /* Record the exit code in the convenience variable $_exitcode, so
3188 that the user can inspect this again later. */
3189 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3190 (LONGEST) ecs->ws.value.integer);
3191 gdb_flush (gdb_stdout);
3192 target_mourn_inferior ();
3193 singlestep_breakpoints_inserted_p = 0;
3194 cancel_single_step_breakpoints ();
3195 stop_print_frame = 0;
3196 stop_stepping (ecs);
3197 return;
3198
3199 case TARGET_WAITKIND_SIGNALLED:
3200 if (debug_infrun)
3201 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3202 inferior_ptid = ecs->ptid;
3203 set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3204 set_current_program_space (current_inferior ()->pspace);
3205 handle_vfork_child_exec_or_exit (0);
3206 stop_print_frame = 0;
3207 target_terminal_ours (); /* Must do this before mourn anyway */
3208
3209 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3210 reach here unless the inferior is dead. However, for years
3211 target_kill() was called here, which hints that fatal signals aren't
3212 really fatal on some systems. If that's true, then some changes
3213 may be needed. */
3214 target_mourn_inferior ();
3215
3216 print_signal_exited_reason (ecs->ws.value.sig);
3217 singlestep_breakpoints_inserted_p = 0;
3218 cancel_single_step_breakpoints ();
3219 stop_stepping (ecs);
3220 return;
3221
3222 /* The following are the only cases in which we keep going;
3223 the above cases end in a continue or goto. */
3224 case TARGET_WAITKIND_FORKED:
3225 case TARGET_WAITKIND_VFORKED:
3226 if (debug_infrun)
3227 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3228
3229 if (!ptid_equal (ecs->ptid, inferior_ptid))
3230 {
3231 context_switch (ecs->ptid);
3232 reinit_frame_cache ();
3233 }
3234
3235 /* Immediately detach breakpoints from the child before there's
3236 any chance of letting the user delete breakpoints from the
3237 breakpoint lists. If we don't do this early, it's easy to
3238 leave left over traps in the child, vis: "break foo; catch
3239 fork; c; <fork>; del; c; <child calls foo>". We only follow
3240 the fork on the last `continue', and by that time the
3241 breakpoint at "foo" is long gone from the breakpoint table.
3242 If we vforked, then we don't need to unpatch here, since both
3243 parent and child are sharing the same memory pages; we'll
3244 need to unpatch at follow/detach time instead to be certain
3245 that new breakpoints added between catchpoint hit time and
3246 vfork follow are detached. */
3247 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3248 {
3249 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3250
3251 /* This won't actually modify the breakpoint list, but will
3252 physically remove the breakpoints from the child. */
3253 detach_breakpoints (child_pid);
3254 }
3255
3256 if (singlestep_breakpoints_inserted_p)
3257 {
3258 /* Pull the single step breakpoints out of the target. */
3259 remove_single_step_breakpoints ();
3260 singlestep_breakpoints_inserted_p = 0;
3261 }
3262
3263 /* In case the event is caught by a catchpoint, remember that
3264 the event is to be followed at the next resume of the thread,
3265 and not immediately. */
3266 ecs->event_thread->pending_follow = ecs->ws;
3267
3268 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3269
3270 ecs->event_thread->stop_bpstat
3271 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3272 stop_pc, ecs->ptid);
3273
3274 /* Note that we're interested in knowing the bpstat actually
3275 causes a stop, not just if it may explain the signal.
3276 Software watchpoints, for example, always appear in the
3277 bpstat. */
3278 ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3279
3280 /* If no catchpoint triggered for this, then keep going. */
3281 if (ecs->random_signal)
3282 {
3283 ptid_t parent;
3284 ptid_t child;
3285 int should_resume;
3286 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3287
3288 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3289
3290 should_resume = follow_fork ();
3291
3292 parent = ecs->ptid;
3293 child = ecs->ws.value.related_pid;
3294
3295 /* In non-stop mode, also resume the other branch. */
3296 if (non_stop && !detach_fork)
3297 {
3298 if (follow_child)
3299 switch_to_thread (parent);
3300 else
3301 switch_to_thread (child);
3302
3303 ecs->event_thread = inferior_thread ();
3304 ecs->ptid = inferior_ptid;
3305 keep_going (ecs);
3306 }
3307
3308 if (follow_child)
3309 switch_to_thread (child);
3310 else
3311 switch_to_thread (parent);
3312
3313 ecs->event_thread = inferior_thread ();
3314 ecs->ptid = inferior_ptid;
3315
3316 if (should_resume)
3317 keep_going (ecs);
3318 else
3319 stop_stepping (ecs);
3320 return;
3321 }
3322 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3323 goto process_event_stop_test;
3324
3325 case TARGET_WAITKIND_VFORK_DONE:
3326 /* Done with the shared memory region. Re-insert breakpoints in
3327 the parent, and keep going. */
3328
3329 if (debug_infrun)
3330 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3331
3332 if (!ptid_equal (ecs->ptid, inferior_ptid))
3333 context_switch (ecs->ptid);
3334
3335 current_inferior ()->waiting_for_vfork_done = 0;
3336 current_inferior ()->pspace->breakpoints_not_allowed = 0;
3337 /* This also takes care of reinserting breakpoints in the
3338 previously locked inferior. */
3339 keep_going (ecs);
3340 return;
3341
3342 case TARGET_WAITKIND_EXECD:
3343 if (debug_infrun)
3344 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3345
3346 if (!ptid_equal (ecs->ptid, inferior_ptid))
3347 {
3348 context_switch (ecs->ptid);
3349 reinit_frame_cache ();
3350 }
3351
3352 singlestep_breakpoints_inserted_p = 0;
3353 cancel_single_step_breakpoints ();
3354
3355 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3356
3357 /* Do whatever is necessary to the parent branch of the vfork. */
3358 handle_vfork_child_exec_or_exit (1);
3359
3360 /* This causes the eventpoints and symbol table to be reset.
3361 Must do this now, before trying to determine whether to
3362 stop. */
3363 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3364
3365 ecs->event_thread->stop_bpstat
3366 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3367 stop_pc, ecs->ptid);
3368 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3369
3370 /* Note that this may be referenced from inside
3371 bpstat_stop_status above, through inferior_has_execd. */
3372 xfree (ecs->ws.value.execd_pathname);
3373 ecs->ws.value.execd_pathname = NULL;
3374
3375 /* If no catchpoint triggered for this, then keep going. */
3376 if (ecs->random_signal)
3377 {
3378 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3379 keep_going (ecs);
3380 return;
3381 }
3382 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3383 goto process_event_stop_test;
3384
3385 /* Be careful not to try to gather much state about a thread
3386 that's in a syscall. It's frequently a losing proposition. */
3387 case TARGET_WAITKIND_SYSCALL_ENTRY:
3388 if (debug_infrun)
3389 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3390 /* Getting the current syscall number */
3391 if (handle_syscall_event (ecs) != 0)
3392 return;
3393 goto process_event_stop_test;
3394
3395 /* Before examining the threads further, step this thread to
3396 get it entirely out of the syscall. (We get notice of the
3397 event when the thread is just on the verge of exiting a
3398 syscall. Stepping one instruction seems to get it back
3399 into user code.) */
3400 case TARGET_WAITKIND_SYSCALL_RETURN:
3401 if (debug_infrun)
3402 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3403 if (handle_syscall_event (ecs) != 0)
3404 return;
3405 goto process_event_stop_test;
3406
3407 case TARGET_WAITKIND_STOPPED:
3408 if (debug_infrun)
3409 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3410 ecs->event_thread->stop_signal = ecs->ws.value.sig;
3411 break;
3412
3413 case TARGET_WAITKIND_NO_HISTORY:
3414 /* Reverse execution: target ran out of history info. */
3415 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3416 print_no_history_reason ();
3417 stop_stepping (ecs);
3418 return;
3419 }
3420
3421 if (ecs->new_thread_event)
3422 {
3423 if (non_stop)
3424 /* Non-stop assumes that the target handles adding new threads
3425 to the thread list. */
3426 internal_error (__FILE__, __LINE__, "\
3427 targets should add new threads to the thread list themselves in non-stop mode.");
3428
3429 /* We may want to consider not doing a resume here in order to
3430 give the user a chance to play with the new thread. It might
3431 be good to make that a user-settable option. */
3432
3433 /* At this point, all threads are stopped (happens automatically
3434 in either the OS or the native code). Therefore we need to
3435 continue all threads in order to make progress. */
3436
3437 if (!ptid_equal (ecs->ptid, inferior_ptid))
3438 context_switch (ecs->ptid);
3439 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3440 prepare_to_wait (ecs);
3441 return;
3442 }
3443
3444 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3445 {
3446 /* Do we need to clean up the state of a thread that has
3447 completed a displaced single-step? (Doing so usually affects
3448 the PC, so do it here, before we set stop_pc.) */
3449 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3450
3451 /* If we either finished a single-step or hit a breakpoint, but
3452 the user wanted this thread to be stopped, pretend we got a
3453 SIG0 (generic unsignaled stop). */
3454
3455 if (ecs->event_thread->stop_requested
3456 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3457 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3458 }
3459
3460 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3461
3462 if (debug_infrun)
3463 {
3464 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3465 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3466 struct cleanup *old_chain = save_inferior_ptid ();
3467
3468 inferior_ptid = ecs->ptid;
3469
3470 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3471 paddress (gdbarch, stop_pc));
3472 if (target_stopped_by_watchpoint ())
3473 {
3474 CORE_ADDR addr;
3475
3476 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3477
3478 if (target_stopped_data_address (&current_target, &addr))
3479 fprintf_unfiltered (gdb_stdlog,
3480 "infrun: stopped data address = %s\n",
3481 paddress (gdbarch, addr));
3482 else
3483 fprintf_unfiltered (gdb_stdlog,
3484 "infrun: (no data address available)\n");
3485 }
3486
3487 do_cleanups (old_chain);
3488 }
3489
3490 if (stepping_past_singlestep_breakpoint)
3491 {
3492 gdb_assert (singlestep_breakpoints_inserted_p);
3493 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3494 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3495
3496 stepping_past_singlestep_breakpoint = 0;
3497
3498 /* We've either finished single-stepping past the single-step
3499 breakpoint, or stopped for some other reason. It would be nice if
3500 we could tell, but we can't reliably. */
3501 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3502 {
3503 if (debug_infrun)
3504 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3505 /* Pull the single step breakpoints out of the target. */
3506 remove_single_step_breakpoints ();
3507 singlestep_breakpoints_inserted_p = 0;
3508
3509 ecs->random_signal = 0;
3510 ecs->event_thread->trap_expected = 0;
3511
3512 context_switch (saved_singlestep_ptid);
3513 if (deprecated_context_hook)
3514 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3515
3516 resume (1, TARGET_SIGNAL_0);
3517 prepare_to_wait (ecs);
3518 return;
3519 }
3520 }
3521
3522 if (!ptid_equal (deferred_step_ptid, null_ptid))
3523 {
3524 /* In non-stop mode, there's never a deferred_step_ptid set. */
3525 gdb_assert (!non_stop);
3526
3527 /* If we stopped for some other reason than single-stepping, ignore
3528 the fact that we were supposed to switch back. */
3529 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3530 {
3531 if (debug_infrun)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "infrun: handling deferred step\n");
3534
3535 /* Pull the single step breakpoints out of the target. */
3536 if (singlestep_breakpoints_inserted_p)
3537 {
3538 remove_single_step_breakpoints ();
3539 singlestep_breakpoints_inserted_p = 0;
3540 }
3541
3542 /* Note: We do not call context_switch at this point, as the
3543 context is already set up for stepping the original thread. */
3544 switch_to_thread (deferred_step_ptid);
3545 deferred_step_ptid = null_ptid;
3546 /* Suppress spurious "Switching to ..." message. */
3547 previous_inferior_ptid = inferior_ptid;
3548
3549 resume (1, TARGET_SIGNAL_0);
3550 prepare_to_wait (ecs);
3551 return;
3552 }
3553
3554 deferred_step_ptid = null_ptid;
3555 }
3556
3557 /* See if a thread hit a thread-specific breakpoint that was meant for
3558 another thread. If so, then step that thread past the breakpoint,
3559 and continue it. */
3560
3561 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3562 {
3563 int thread_hop_needed = 0;
3564 struct address_space *aspace =
3565 get_regcache_aspace (get_thread_regcache (ecs->ptid));
3566
3567 /* Check if a regular breakpoint has been hit before checking
3568 for a potential single step breakpoint. Otherwise, GDB will
3569 not see this breakpoint hit when stepping onto breakpoints. */
3570 if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3571 {
3572 ecs->random_signal = 0;
3573 if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3574 thread_hop_needed = 1;
3575 }
3576 else if (singlestep_breakpoints_inserted_p)
3577 {
3578 /* We have not context switched yet, so this should be true
3579 no matter which thread hit the singlestep breakpoint. */
3580 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3581 if (debug_infrun)
3582 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3583 "trap for %s\n",
3584 target_pid_to_str (ecs->ptid));
3585
3586 ecs->random_signal = 0;
3587 /* The call to in_thread_list is necessary because PTIDs sometimes
3588 change when we go from single-threaded to multi-threaded. If
3589 the singlestep_ptid is still in the list, assume that it is
3590 really different from ecs->ptid. */
3591 if (!ptid_equal (singlestep_ptid, ecs->ptid)
3592 && in_thread_list (singlestep_ptid))
3593 {
3594 /* If the PC of the thread we were trying to single-step
3595 has changed, discard this event (which we were going
3596 to ignore anyway), and pretend we saw that thread
3597 trap. This prevents us continuously moving the
3598 single-step breakpoint forward, one instruction at a
3599 time. If the PC has changed, then the thread we were
3600 trying to single-step has trapped or been signalled,
3601 but the event has not been reported to GDB yet.
3602
3603 There might be some cases where this loses signal
3604 information, if a signal has arrived at exactly the
3605 same time that the PC changed, but this is the best
3606 we can do with the information available. Perhaps we
3607 should arrange to report all events for all threads
3608 when they stop, or to re-poll the remote looking for
3609 this particular thread (i.e. temporarily enable
3610 schedlock). */
3611
3612 CORE_ADDR new_singlestep_pc
3613 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3614
3615 if (new_singlestep_pc != singlestep_pc)
3616 {
3617 enum target_signal stop_signal;
3618
3619 if (debug_infrun)
3620 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3621 " but expected thread advanced also\n");
3622
3623 /* The current context still belongs to
3624 singlestep_ptid. Don't swap here, since that's
3625 the context we want to use. Just fudge our
3626 state and continue. */
3627 stop_signal = ecs->event_thread->stop_signal;
3628 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3629 ecs->ptid = singlestep_ptid;
3630 ecs->event_thread = find_thread_ptid (ecs->ptid);
3631 ecs->event_thread->stop_signal = stop_signal;
3632 stop_pc = new_singlestep_pc;
3633 }
3634 else
3635 {
3636 if (debug_infrun)
3637 fprintf_unfiltered (gdb_stdlog,
3638 "infrun: unexpected thread\n");
3639
3640 thread_hop_needed = 1;
3641 stepping_past_singlestep_breakpoint = 1;
3642 saved_singlestep_ptid = singlestep_ptid;
3643 }
3644 }
3645 }
3646
3647 if (thread_hop_needed)
3648 {
3649 struct regcache *thread_regcache;
3650 int remove_status = 0;
3651
3652 if (debug_infrun)
3653 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3654
3655 /* Switch context before touching inferior memory, the
3656 previous thread may have exited. */
3657 if (!ptid_equal (inferior_ptid, ecs->ptid))
3658 context_switch (ecs->ptid);
3659
3660 /* Saw a breakpoint, but it was hit by the wrong thread.
3661 Just continue. */
3662
3663 if (singlestep_breakpoints_inserted_p)
3664 {
3665 /* Pull the single step breakpoints out of the target. */
3666 remove_single_step_breakpoints ();
3667 singlestep_breakpoints_inserted_p = 0;
3668 }
3669
3670 /* If the arch can displace step, don't remove the
3671 breakpoints. */
3672 thread_regcache = get_thread_regcache (ecs->ptid);
3673 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3674 remove_status = remove_breakpoints ();
3675
3676 /* Did we fail to remove breakpoints? If so, try
3677 to set the PC past the bp. (There's at least
3678 one situation in which we can fail to remove
3679 the bp's: On HP-UX's that use ttrace, we can't
3680 change the address space of a vforking child
3681 process until the child exits (well, okay, not
3682 then either :-) or execs. */
3683 if (remove_status != 0)
3684 error (_("Cannot step over breakpoint hit in wrong thread"));
3685 else
3686 { /* Single step */
3687 if (!non_stop)
3688 {
3689 /* Only need to require the next event from this
3690 thread in all-stop mode. */
3691 waiton_ptid = ecs->ptid;
3692 infwait_state = infwait_thread_hop_state;
3693 }
3694
3695 ecs->event_thread->stepping_over_breakpoint = 1;
3696 keep_going (ecs);
3697 return;
3698 }
3699 }
3700 else if (singlestep_breakpoints_inserted_p)
3701 {
3702 sw_single_step_trap_p = 1;
3703 ecs->random_signal = 0;
3704 }
3705 }
3706 else
3707 ecs->random_signal = 1;
3708
3709 /* See if something interesting happened to the non-current thread. If
3710 so, then switch to that thread. */
3711 if (!ptid_equal (ecs->ptid, inferior_ptid))
3712 {
3713 if (debug_infrun)
3714 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3715
3716 context_switch (ecs->ptid);
3717
3718 if (deprecated_context_hook)
3719 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3720 }
3721
3722 /* At this point, get hold of the now-current thread's frame. */
3723 frame = get_current_frame ();
3724 gdbarch = get_frame_arch (frame);
3725
3726 if (singlestep_breakpoints_inserted_p)
3727 {
3728 /* Pull the single step breakpoints out of the target. */
3729 remove_single_step_breakpoints ();
3730 singlestep_breakpoints_inserted_p = 0;
3731 }
3732
3733 if (stepped_after_stopped_by_watchpoint)
3734 stopped_by_watchpoint = 0;
3735 else
3736 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3737
3738 /* If necessary, step over this watchpoint. We'll be back to display
3739 it in a moment. */
3740 if (stopped_by_watchpoint
3741 && (target_have_steppable_watchpoint
3742 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3743 {
3744 /* At this point, we are stopped at an instruction which has
3745 attempted to write to a piece of memory under control of
3746 a watchpoint. The instruction hasn't actually executed
3747 yet. If we were to evaluate the watchpoint expression
3748 now, we would get the old value, and therefore no change
3749 would seem to have occurred.
3750
3751 In order to make watchpoints work `right', we really need
3752 to complete the memory write, and then evaluate the
3753 watchpoint expression. We do this by single-stepping the
3754 target.
3755
3756 It may not be necessary to disable the watchpoint to stop over
3757 it. For example, the PA can (with some kernel cooperation)
3758 single step over a watchpoint without disabling the watchpoint.
3759
3760 It is far more common to need to disable a watchpoint to step
3761 the inferior over it. If we have non-steppable watchpoints,
3762 we must disable the current watchpoint; it's simplest to
3763 disable all watchpoints and breakpoints. */
3764 int hw_step = 1;
3765
3766 if (!target_have_steppable_watchpoint)
3767 remove_breakpoints ();
3768 /* Single step */
3769 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3770 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3771 waiton_ptid = ecs->ptid;
3772 if (target_have_steppable_watchpoint)
3773 infwait_state = infwait_step_watch_state;
3774 else
3775 infwait_state = infwait_nonstep_watch_state;
3776 prepare_to_wait (ecs);
3777 return;
3778 }
3779
3780 ecs->stop_func_start = 0;
3781 ecs->stop_func_end = 0;
3782 ecs->stop_func_name = 0;
3783 /* Don't care about return value; stop_func_start and stop_func_name
3784 will both be 0 if it doesn't work. */
3785 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3786 &ecs->stop_func_start, &ecs->stop_func_end);
3787 ecs->stop_func_start
3788 += gdbarch_deprecated_function_start_offset (gdbarch);
3789 ecs->event_thread->stepping_over_breakpoint = 0;
3790 bpstat_clear (&ecs->event_thread->stop_bpstat);
3791 ecs->event_thread->stop_step = 0;
3792 stop_print_frame = 1;
3793 ecs->random_signal = 0;
3794 stopped_by_random_signal = 0;
3795
3796 /* Hide inlined functions starting here, unless we just performed stepi or
3797 nexti. After stepi and nexti, always show the innermost frame (not any
3798 inline function call sites). */
3799 if (ecs->event_thread->step_range_end != 1)
3800 skip_inline_frames (ecs->ptid);
3801
3802 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3803 && ecs->event_thread->trap_expected
3804 && gdbarch_single_step_through_delay_p (gdbarch)
3805 && currently_stepping (ecs->event_thread))
3806 {
3807 /* We're trying to step off a breakpoint. Turns out that we're
3808 also on an instruction that needs to be stepped multiple
3809 times before it's been fully executing. E.g., architectures
3810 with a delay slot. It needs to be stepped twice, once for
3811 the instruction and once for the delay slot. */
3812 int step_through_delay
3813 = gdbarch_single_step_through_delay (gdbarch, frame);
3814
3815 if (debug_infrun && step_through_delay)
3816 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3817 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3818 {
3819 /* The user issued a continue when stopped at a breakpoint.
3820 Set up for another trap and get out of here. */
3821 ecs->event_thread->stepping_over_breakpoint = 1;
3822 keep_going (ecs);
3823 return;
3824 }
3825 else if (step_through_delay)
3826 {
3827 /* The user issued a step when stopped at a breakpoint.
3828 Maybe we should stop, maybe we should not - the delay
3829 slot *might* correspond to a line of source. In any
3830 case, don't decide that here, just set
3831 ecs->stepping_over_breakpoint, making sure we
3832 single-step again before breakpoints are re-inserted. */
3833 ecs->event_thread->stepping_over_breakpoint = 1;
3834 }
3835 }
3836
3837 /* Look at the cause of the stop, and decide what to do.
3838 The alternatives are:
3839 1) stop_stepping and return; to really stop and return to the debugger,
3840 2) keep_going and return to start up again
3841 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3842 3) set ecs->random_signal to 1, and the decision between 1 and 2
3843 will be made according to the signal handling tables. */
3844
3845 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3846 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3847 || stop_soon == STOP_QUIETLY_REMOTE)
3848 {
3849 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3850 {
3851 if (debug_infrun)
3852 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3853 stop_print_frame = 0;
3854 stop_stepping (ecs);
3855 return;
3856 }
3857
3858 /* This is originated from start_remote(), start_inferior() and
3859 shared libraries hook functions. */
3860 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3861 {
3862 if (debug_infrun)
3863 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3864 stop_stepping (ecs);
3865 return;
3866 }
3867
3868 /* This originates from attach_command(). We need to overwrite
3869 the stop_signal here, because some kernels don't ignore a
3870 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3871 See more comments in inferior.h. On the other hand, if we
3872 get a non-SIGSTOP, report it to the user - assume the backend
3873 will handle the SIGSTOP if it should show up later.
3874
3875 Also consider that the attach is complete when we see a
3876 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3877 target extended-remote report it instead of a SIGSTOP
3878 (e.g. gdbserver). We already rely on SIGTRAP being our
3879 signal, so this is no exception.
3880
3881 Also consider that the attach is complete when we see a
3882 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3883 the target to stop all threads of the inferior, in case the
3884 low level attach operation doesn't stop them implicitly. If
3885 they weren't stopped implicitly, then the stub will report a
3886 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3887 other than GDB's request. */
3888 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3889 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3890 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3891 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3892 {
3893 stop_stepping (ecs);
3894 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3895 return;
3896 }
3897
3898 /* See if there is a breakpoint at the current PC. */
3899 ecs->event_thread->stop_bpstat
3900 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3901 stop_pc, ecs->ptid);
3902
3903 /* Following in case break condition called a
3904 function. */
3905 stop_print_frame = 1;
3906
3907 /* This is where we handle "moribund" watchpoints. Unlike
3908 software breakpoints traps, hardware watchpoint traps are
3909 always distinguishable from random traps. If no high-level
3910 watchpoint is associated with the reported stop data address
3911 anymore, then the bpstat does not explain the signal ---
3912 simply make sure to ignore it if `stopped_by_watchpoint' is
3913 set. */
3914
3915 if (debug_infrun
3916 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3917 && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3918 && stopped_by_watchpoint)
3919 fprintf_unfiltered (gdb_stdlog, "\
3920 infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3921
3922 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3923 at one stage in the past included checks for an inferior
3924 function call's call dummy's return breakpoint. The original
3925 comment, that went with the test, read:
3926
3927 ``End of a stack dummy. Some systems (e.g. Sony news) give
3928 another signal besides SIGTRAP, so check here as well as
3929 above.''
3930
3931 If someone ever tries to get call dummys on a
3932 non-executable stack to work (where the target would stop
3933 with something like a SIGSEGV), then those tests might need
3934 to be re-instated. Given, however, that the tests were only
3935 enabled when momentary breakpoints were not being used, I
3936 suspect that it won't be the case.
3937
3938 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3939 be necessary for call dummies on a non-executable stack on
3940 SPARC. */
3941
3942 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3943 ecs->random_signal
3944 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3945 || stopped_by_watchpoint
3946 || ecs->event_thread->trap_expected
3947 || (ecs->event_thread->step_range_end
3948 && ecs->event_thread->step_resume_breakpoint == NULL));
3949 else
3950 {
3951 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3952 if (!ecs->random_signal)
3953 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3954 }
3955 }
3956
3957 /* When we reach this point, we've pretty much decided
3958 that the reason for stopping must've been a random
3959 (unexpected) signal. */
3960
3961 else
3962 ecs->random_signal = 1;
3963
3964 process_event_stop_test:
3965
3966 /* Re-fetch current thread's frame in case we did a
3967 "goto process_event_stop_test" above. */
3968 frame = get_current_frame ();
3969 gdbarch = get_frame_arch (frame);
3970
3971 /* For the program's own signals, act according to
3972 the signal handling tables. */
3973
3974 if (ecs->random_signal)
3975 {
3976 /* Signal not for debugging purposes. */
3977 int printed = 0;
3978 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3979
3980 if (debug_infrun)
3981 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3982 ecs->event_thread->stop_signal);
3983
3984 stopped_by_random_signal = 1;
3985
3986 if (signal_print[ecs->event_thread->stop_signal])
3987 {
3988 printed = 1;
3989 target_terminal_ours_for_output ();
3990 print_signal_received_reason (ecs->event_thread->stop_signal);
3991 }
3992 /* Always stop on signals if we're either just gaining control
3993 of the program, or the user explicitly requested this thread
3994 to remain stopped. */
3995 if (stop_soon != NO_STOP_QUIETLY
3996 || ecs->event_thread->stop_requested
3997 || (!inf->detaching
3998 && signal_stop_state (ecs->event_thread->stop_signal)))
3999 {
4000 stop_stepping (ecs);
4001 return;
4002 }
4003 /* If not going to stop, give terminal back
4004 if we took it away. */
4005 else if (printed)
4006 target_terminal_inferior ();
4007
4008 /* Clear the signal if it should not be passed. */
4009 if (signal_program[ecs->event_thread->stop_signal] == 0)
4010 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4011
4012 if (ecs->event_thread->prev_pc == stop_pc
4013 && ecs->event_thread->trap_expected
4014 && ecs->event_thread->step_resume_breakpoint == NULL)
4015 {
4016 /* We were just starting a new sequence, attempting to
4017 single-step off of a breakpoint and expecting a SIGTRAP.
4018 Instead this signal arrives. This signal will take us out
4019 of the stepping range so GDB needs to remember to, when
4020 the signal handler returns, resume stepping off that
4021 breakpoint. */
4022 /* To simplify things, "continue" is forced to use the same
4023 code paths as single-step - set a breakpoint at the
4024 signal return address and then, once hit, step off that
4025 breakpoint. */
4026 if (debug_infrun)
4027 fprintf_unfiltered (gdb_stdlog,
4028 "infrun: signal arrived while stepping over "
4029 "breakpoint\n");
4030
4031 insert_step_resume_breakpoint_at_frame (frame);
4032 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4033 keep_going (ecs);
4034 return;
4035 }
4036
4037 if (ecs->event_thread->step_range_end != 0
4038 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
4039 && (ecs->event_thread->step_range_start <= stop_pc
4040 && stop_pc < ecs->event_thread->step_range_end)
4041 && frame_id_eq (get_stack_frame_id (frame),
4042 ecs->event_thread->step_stack_frame_id)
4043 && ecs->event_thread->step_resume_breakpoint == NULL)
4044 {
4045 /* The inferior is about to take a signal that will take it
4046 out of the single step range. Set a breakpoint at the
4047 current PC (which is presumably where the signal handler
4048 will eventually return) and then allow the inferior to
4049 run free.
4050
4051 Note that this is only needed for a signal delivered
4052 while in the single-step range. Nested signals aren't a
4053 problem as they eventually all return. */
4054 if (debug_infrun)
4055 fprintf_unfiltered (gdb_stdlog,
4056 "infrun: signal may take us out of "
4057 "single-step range\n");
4058
4059 insert_step_resume_breakpoint_at_frame (frame);
4060 keep_going (ecs);
4061 return;
4062 }
4063
4064 /* Note: step_resume_breakpoint may be non-NULL. This occures
4065 when either there's a nested signal, or when there's a
4066 pending signal enabled just as the signal handler returns
4067 (leaving the inferior at the step-resume-breakpoint without
4068 actually executing it). Either way continue until the
4069 breakpoint is really hit. */
4070 keep_going (ecs);
4071 return;
4072 }
4073
4074 /* Handle cases caused by hitting a breakpoint. */
4075 {
4076 CORE_ADDR jmp_buf_pc;
4077 struct bpstat_what what;
4078
4079 what = bpstat_what (ecs->event_thread->stop_bpstat);
4080
4081 if (what.call_dummy)
4082 {
4083 stop_stack_dummy = what.call_dummy;
4084 }
4085
4086 /* If we hit an internal event that triggers symbol changes, the
4087 current frame will be invalidated within bpstat_what (e.g., if
4088 we hit an internal solib event). Re-fetch it. */
4089 frame = get_current_frame ();
4090 gdbarch = get_frame_arch (frame);
4091
4092 switch (what.main_action)
4093 {
4094 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4095 /* If we hit the breakpoint at longjmp while stepping, we
4096 install a momentary breakpoint at the target of the
4097 jmp_buf. */
4098
4099 if (debug_infrun)
4100 fprintf_unfiltered (gdb_stdlog,
4101 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4102
4103 ecs->event_thread->stepping_over_breakpoint = 1;
4104
4105 if (!gdbarch_get_longjmp_target_p (gdbarch)
4106 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
4107 {
4108 if (debug_infrun)
4109 fprintf_unfiltered (gdb_stdlog, "\
4110 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4111 keep_going (ecs);
4112 return;
4113 }
4114
4115 /* We're going to replace the current step-resume breakpoint
4116 with a longjmp-resume breakpoint. */
4117 delete_step_resume_breakpoint (ecs->event_thread);
4118
4119 /* Insert a breakpoint at resume address. */
4120 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4121
4122 keep_going (ecs);
4123 return;
4124
4125 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4126 if (debug_infrun)
4127 fprintf_unfiltered (gdb_stdlog,
4128 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4129
4130 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4131 delete_step_resume_breakpoint (ecs->event_thread);
4132
4133 ecs->event_thread->stop_step = 1;
4134 print_end_stepping_range_reason ();
4135 stop_stepping (ecs);
4136 return;
4137
4138 case BPSTAT_WHAT_SINGLE:
4139 if (debug_infrun)
4140 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4141 ecs->event_thread->stepping_over_breakpoint = 1;
4142 /* Still need to check other stuff, at least the case
4143 where we are stepping and step out of the right range. */
4144 break;
4145
4146 case BPSTAT_WHAT_STOP_NOISY:
4147 if (debug_infrun)
4148 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4149 stop_print_frame = 1;
4150
4151 /* We are about to nuke the step_resume_breakpointt via the
4152 cleanup chain, so no need to worry about it here. */
4153
4154 stop_stepping (ecs);
4155 return;
4156
4157 case BPSTAT_WHAT_STOP_SILENT:
4158 if (debug_infrun)
4159 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4160 stop_print_frame = 0;
4161
4162 /* We are about to nuke the step_resume_breakpoin via the
4163 cleanup chain, so no need to worry about it here. */
4164
4165 stop_stepping (ecs);
4166 return;
4167
4168 case BPSTAT_WHAT_STEP_RESUME:
4169 if (debug_infrun)
4170 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4171
4172 delete_step_resume_breakpoint (ecs->event_thread);
4173 if (ecs->event_thread->step_after_step_resume_breakpoint)
4174 {
4175 /* Back when the step-resume breakpoint was inserted, we
4176 were trying to single-step off a breakpoint. Go back
4177 to doing that. */
4178 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4179 ecs->event_thread->stepping_over_breakpoint = 1;
4180 keep_going (ecs);
4181 return;
4182 }
4183 if (stop_pc == ecs->stop_func_start
4184 && execution_direction == EXEC_REVERSE)
4185 {
4186 /* We are stepping over a function call in reverse, and
4187 just hit the step-resume breakpoint at the start
4188 address of the function. Go back to single-stepping,
4189 which should take us back to the function call. */
4190 ecs->event_thread->stepping_over_breakpoint = 1;
4191 keep_going (ecs);
4192 return;
4193 }
4194 break;
4195
4196 case BPSTAT_WHAT_KEEP_CHECKING:
4197 break;
4198 }
4199 }
4200
4201 /* We come here if we hit a breakpoint but should not
4202 stop for it. Possibly we also were stepping
4203 and should stop for that. So fall through and
4204 test for stepping. But, if not stepping,
4205 do not stop. */
4206
4207 /* In all-stop mode, if we're currently stepping but have stopped in
4208 some other thread, we need to switch back to the stepped thread. */
4209 if (!non_stop)
4210 {
4211 struct thread_info *tp;
4212
4213 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4214 ecs->event_thread);
4215 if (tp)
4216 {
4217 /* However, if the current thread is blocked on some internal
4218 breakpoint, and we simply need to step over that breakpoint
4219 to get it going again, do that first. */
4220 if ((ecs->event_thread->trap_expected
4221 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4222 || ecs->event_thread->stepping_over_breakpoint)
4223 {
4224 keep_going (ecs);
4225 return;
4226 }
4227
4228 /* If the stepping thread exited, then don't try to switch
4229 back and resume it, which could fail in several different
4230 ways depending on the target. Instead, just keep going.
4231
4232 We can find a stepping dead thread in the thread list in
4233 two cases:
4234
4235 - The target supports thread exit events, and when the
4236 target tries to delete the thread from the thread list,
4237 inferior_ptid pointed at the exiting thread. In such
4238 case, calling delete_thread does not really remove the
4239 thread from the list; instead, the thread is left listed,
4240 with 'exited' state.
4241
4242 - The target's debug interface does not support thread
4243 exit events, and so we have no idea whatsoever if the
4244 previously stepping thread is still alive. For that
4245 reason, we need to synchronously query the target
4246 now. */
4247 if (is_exited (tp->ptid)
4248 || !target_thread_alive (tp->ptid))
4249 {
4250 if (debug_infrun)
4251 fprintf_unfiltered (gdb_stdlog, "\
4252 infrun: not switching back to stepped thread, it has vanished\n");
4253
4254 delete_thread (tp->ptid);
4255 keep_going (ecs);
4256 return;
4257 }
4258
4259 /* Otherwise, we no longer expect a trap in the current thread.
4260 Clear the trap_expected flag before switching back -- this is
4261 what keep_going would do as well, if we called it. */
4262 ecs->event_thread->trap_expected = 0;
4263
4264 if (debug_infrun)
4265 fprintf_unfiltered (gdb_stdlog,
4266 "infrun: switching back to stepped thread\n");
4267
4268 ecs->event_thread = tp;
4269 ecs->ptid = tp->ptid;
4270 context_switch (ecs->ptid);
4271 keep_going (ecs);
4272 return;
4273 }
4274 }
4275
4276 /* Are we stepping to get the inferior out of the dynamic linker's
4277 hook (and possibly the dld itself) after catching a shlib
4278 event? */
4279 if (ecs->event_thread->stepping_through_solib_after_catch)
4280 {
4281 #if defined(SOLIB_ADD)
4282 /* Have we reached our destination? If not, keep going. */
4283 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4284 {
4285 if (debug_infrun)
4286 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4287 ecs->event_thread->stepping_over_breakpoint = 1;
4288 keep_going (ecs);
4289 return;
4290 }
4291 #endif
4292 if (debug_infrun)
4293 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4294 /* Else, stop and report the catchpoint(s) whose triggering
4295 caused us to begin stepping. */
4296 ecs->event_thread->stepping_through_solib_after_catch = 0;
4297 bpstat_clear (&ecs->event_thread->stop_bpstat);
4298 ecs->event_thread->stop_bpstat
4299 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4300 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4301 stop_print_frame = 1;
4302 stop_stepping (ecs);
4303 return;
4304 }
4305
4306 if (ecs->event_thread->step_resume_breakpoint)
4307 {
4308 if (debug_infrun)
4309 fprintf_unfiltered (gdb_stdlog,
4310 "infrun: step-resume breakpoint is inserted\n");
4311
4312 /* Having a step-resume breakpoint overrides anything
4313 else having to do with stepping commands until
4314 that breakpoint is reached. */
4315 keep_going (ecs);
4316 return;
4317 }
4318
4319 if (ecs->event_thread->step_range_end == 0)
4320 {
4321 if (debug_infrun)
4322 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4323 /* Likewise if we aren't even stepping. */
4324 keep_going (ecs);
4325 return;
4326 }
4327
4328 /* Re-fetch current thread's frame in case the code above caused
4329 the frame cache to be re-initialized, making our FRAME variable
4330 a dangling pointer. */
4331 frame = get_current_frame ();
4332 gdbarch = get_frame_arch (frame);
4333
4334 /* If stepping through a line, keep going if still within it.
4335
4336 Note that step_range_end is the address of the first instruction
4337 beyond the step range, and NOT the address of the last instruction
4338 within it!
4339
4340 Note also that during reverse execution, we may be stepping
4341 through a function epilogue and therefore must detect when
4342 the current-frame changes in the middle of a line. */
4343
4344 if (stop_pc >= ecs->event_thread->step_range_start
4345 && stop_pc < ecs->event_thread->step_range_end
4346 && (execution_direction != EXEC_REVERSE
4347 || frame_id_eq (get_frame_id (frame),
4348 ecs->event_thread->step_frame_id)))
4349 {
4350 if (debug_infrun)
4351 fprintf_unfiltered
4352 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4353 paddress (gdbarch, ecs->event_thread->step_range_start),
4354 paddress (gdbarch, ecs->event_thread->step_range_end));
4355
4356 /* When stepping backward, stop at beginning of line range
4357 (unless it's the function entry point, in which case
4358 keep going back to the call point). */
4359 if (stop_pc == ecs->event_thread->step_range_start
4360 && stop_pc != ecs->stop_func_start
4361 && execution_direction == EXEC_REVERSE)
4362 {
4363 ecs->event_thread->stop_step = 1;
4364 print_end_stepping_range_reason ();
4365 stop_stepping (ecs);
4366 }
4367 else
4368 keep_going (ecs);
4369
4370 return;
4371 }
4372
4373 /* We stepped out of the stepping range. */
4374
4375 /* If we are stepping at the source level and entered the runtime
4376 loader dynamic symbol resolution code...
4377
4378 EXEC_FORWARD: we keep on single stepping until we exit the run
4379 time loader code and reach the callee's address.
4380
4381 EXEC_REVERSE: we've already executed the callee (backward), and
4382 the runtime loader code is handled just like any other
4383 undebuggable function call. Now we need only keep stepping
4384 backward through the trampoline code, and that's handled further
4385 down, so there is nothing for us to do here. */
4386
4387 if (execution_direction != EXEC_REVERSE
4388 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4389 && in_solib_dynsym_resolve_code (stop_pc))
4390 {
4391 CORE_ADDR pc_after_resolver =
4392 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4393
4394 if (debug_infrun)
4395 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4396
4397 if (pc_after_resolver)
4398 {
4399 /* Set up a step-resume breakpoint at the address
4400 indicated by SKIP_SOLIB_RESOLVER. */
4401 struct symtab_and_line sr_sal;
4402
4403 init_sal (&sr_sal);
4404 sr_sal.pc = pc_after_resolver;
4405 sr_sal.pspace = get_frame_program_space (frame);
4406
4407 insert_step_resume_breakpoint_at_sal (gdbarch,
4408 sr_sal, null_frame_id);
4409 }
4410
4411 keep_going (ecs);
4412 return;
4413 }
4414
4415 if (ecs->event_thread->step_range_end != 1
4416 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4417 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4418 && get_frame_type (frame) == SIGTRAMP_FRAME)
4419 {
4420 if (debug_infrun)
4421 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4422 /* The inferior, while doing a "step" or "next", has ended up in
4423 a signal trampoline (either by a signal being delivered or by
4424 the signal handler returning). Just single-step until the
4425 inferior leaves the trampoline (either by calling the handler
4426 or returning). */
4427 keep_going (ecs);
4428 return;
4429 }
4430
4431 /* Check for subroutine calls. The check for the current frame
4432 equalling the step ID is not necessary - the check of the
4433 previous frame's ID is sufficient - but it is a common case and
4434 cheaper than checking the previous frame's ID.
4435
4436 NOTE: frame_id_eq will never report two invalid frame IDs as
4437 being equal, so to get into this block, both the current and
4438 previous frame must have valid frame IDs. */
4439 /* The outer_frame_id check is a heuristic to detect stepping
4440 through startup code. If we step over an instruction which
4441 sets the stack pointer from an invalid value to a valid value,
4442 we may detect that as a subroutine call from the mythical
4443 "outermost" function. This could be fixed by marking
4444 outermost frames as !stack_p,code_p,special_p. Then the
4445 initial outermost frame, before sp was valid, would
4446 have code_addr == &_start. See the comment in frame_id_eq
4447 for more. */
4448 if (!frame_id_eq (get_stack_frame_id (frame),
4449 ecs->event_thread->step_stack_frame_id)
4450 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4451 ecs->event_thread->step_stack_frame_id)
4452 && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4453 outer_frame_id)
4454 || step_start_function != find_pc_function (stop_pc))))
4455 {
4456 CORE_ADDR real_stop_pc;
4457
4458 if (debug_infrun)
4459 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4460
4461 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4462 || ((ecs->event_thread->step_range_end == 1)
4463 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4464 ecs->stop_func_start)))
4465 {
4466 /* I presume that step_over_calls is only 0 when we're
4467 supposed to be stepping at the assembly language level
4468 ("stepi"). Just stop. */
4469 /* Also, maybe we just did a "nexti" inside a prolog, so we
4470 thought it was a subroutine call but it was not. Stop as
4471 well. FENN */
4472 /* And this works the same backward as frontward. MVS */
4473 ecs->event_thread->stop_step = 1;
4474 print_end_stepping_range_reason ();
4475 stop_stepping (ecs);
4476 return;
4477 }
4478
4479 /* Reverse stepping through solib trampolines. */
4480
4481 if (execution_direction == EXEC_REVERSE
4482 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4483 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4484 || (ecs->stop_func_start == 0
4485 && in_solib_dynsym_resolve_code (stop_pc))))
4486 {
4487 /* Any solib trampoline code can be handled in reverse
4488 by simply continuing to single-step. We have already
4489 executed the solib function (backwards), and a few
4490 steps will take us back through the trampoline to the
4491 caller. */
4492 keep_going (ecs);
4493 return;
4494 }
4495
4496 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4497 {
4498 /* We're doing a "next".
4499
4500 Normal (forward) execution: set a breakpoint at the
4501 callee's return address (the address at which the caller
4502 will resume).
4503
4504 Reverse (backward) execution. set the step-resume
4505 breakpoint at the start of the function that we just
4506 stepped into (backwards), and continue to there. When we
4507 get there, we'll need to single-step back to the caller. */
4508
4509 if (execution_direction == EXEC_REVERSE)
4510 {
4511 struct symtab_and_line sr_sal;
4512
4513 /* Normal function call return (static or dynamic). */
4514 init_sal (&sr_sal);
4515 sr_sal.pc = ecs->stop_func_start;
4516 sr_sal.pspace = get_frame_program_space (frame);
4517 insert_step_resume_breakpoint_at_sal (gdbarch,
4518 sr_sal, null_frame_id);
4519 }
4520 else
4521 insert_step_resume_breakpoint_at_caller (frame);
4522
4523 keep_going (ecs);
4524 return;
4525 }
4526
4527 /* If we are in a function call trampoline (a stub between the
4528 calling routine and the real function), locate the real
4529 function. That's what tells us (a) whether we want to step
4530 into it at all, and (b) what prologue we want to run to the
4531 end of, if we do step into it. */
4532 real_stop_pc = skip_language_trampoline (frame, stop_pc);
4533 if (real_stop_pc == 0)
4534 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4535 if (real_stop_pc != 0)
4536 ecs->stop_func_start = real_stop_pc;
4537
4538 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4539 {
4540 struct symtab_and_line sr_sal;
4541
4542 init_sal (&sr_sal);
4543 sr_sal.pc = ecs->stop_func_start;
4544 sr_sal.pspace = get_frame_program_space (frame);
4545
4546 insert_step_resume_breakpoint_at_sal (gdbarch,
4547 sr_sal, null_frame_id);
4548 keep_going (ecs);
4549 return;
4550 }
4551
4552 /* If we have line number information for the function we are
4553 thinking of stepping into, step into it.
4554
4555 If there are several symtabs at that PC (e.g. with include
4556 files), just want to know whether *any* of them have line
4557 numbers. find_pc_line handles this. */
4558 {
4559 struct symtab_and_line tmp_sal;
4560
4561 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4562 tmp_sal.pspace = get_frame_program_space (frame);
4563 if (tmp_sal.line != 0)
4564 {
4565 if (execution_direction == EXEC_REVERSE)
4566 handle_step_into_function_backward (gdbarch, ecs);
4567 else
4568 handle_step_into_function (gdbarch, ecs);
4569 return;
4570 }
4571 }
4572
4573 /* If we have no line number and the step-stop-if-no-debug is
4574 set, we stop the step so that the user has a chance to switch
4575 in assembly mode. */
4576 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4577 && step_stop_if_no_debug)
4578 {
4579 ecs->event_thread->stop_step = 1;
4580 print_end_stepping_range_reason ();
4581 stop_stepping (ecs);
4582 return;
4583 }
4584
4585 if (execution_direction == EXEC_REVERSE)
4586 {
4587 /* Set a breakpoint at callee's start address.
4588 From there we can step once and be back in the caller. */
4589 struct symtab_and_line sr_sal;
4590
4591 init_sal (&sr_sal);
4592 sr_sal.pc = ecs->stop_func_start;
4593 sr_sal.pspace = get_frame_program_space (frame);
4594 insert_step_resume_breakpoint_at_sal (gdbarch,
4595 sr_sal, null_frame_id);
4596 }
4597 else
4598 /* Set a breakpoint at callee's return address (the address
4599 at which the caller will resume). */
4600 insert_step_resume_breakpoint_at_caller (frame);
4601
4602 keep_going (ecs);
4603 return;
4604 }
4605
4606 /* Reverse stepping through solib trampolines. */
4607
4608 if (execution_direction == EXEC_REVERSE
4609 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4610 {
4611 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4612 || (ecs->stop_func_start == 0
4613 && in_solib_dynsym_resolve_code (stop_pc)))
4614 {
4615 /* Any solib trampoline code can be handled in reverse
4616 by simply continuing to single-step. We have already
4617 executed the solib function (backwards), and a few
4618 steps will take us back through the trampoline to the
4619 caller. */
4620 keep_going (ecs);
4621 return;
4622 }
4623 else if (in_solib_dynsym_resolve_code (stop_pc))
4624 {
4625 /* Stepped backward into the solib dynsym resolver.
4626 Set a breakpoint at its start and continue, then
4627 one more step will take us out. */
4628 struct symtab_and_line sr_sal;
4629
4630 init_sal (&sr_sal);
4631 sr_sal.pc = ecs->stop_func_start;
4632 sr_sal.pspace = get_frame_program_space (frame);
4633 insert_step_resume_breakpoint_at_sal (gdbarch,
4634 sr_sal, null_frame_id);
4635 keep_going (ecs);
4636 return;
4637 }
4638 }
4639
4640 /* If we're in the return path from a shared library trampoline,
4641 we want to proceed through the trampoline when stepping. */
4642 if (gdbarch_in_solib_return_trampoline (gdbarch,
4643 stop_pc, ecs->stop_func_name))
4644 {
4645 /* Determine where this trampoline returns. */
4646 CORE_ADDR real_stop_pc;
4647
4648 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4649
4650 if (debug_infrun)
4651 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4652
4653 /* Only proceed through if we know where it's going. */
4654 if (real_stop_pc)
4655 {
4656 /* And put the step-breakpoint there and go until there. */
4657 struct symtab_and_line sr_sal;
4658
4659 init_sal (&sr_sal); /* initialize to zeroes */
4660 sr_sal.pc = real_stop_pc;
4661 sr_sal.section = find_pc_overlay (sr_sal.pc);
4662 sr_sal.pspace = get_frame_program_space (frame);
4663
4664 /* Do not specify what the fp should be when we stop since
4665 on some machines the prologue is where the new fp value
4666 is established. */
4667 insert_step_resume_breakpoint_at_sal (gdbarch,
4668 sr_sal, null_frame_id);
4669
4670 /* Restart without fiddling with the step ranges or
4671 other state. */
4672 keep_going (ecs);
4673 return;
4674 }
4675 }
4676
4677 stop_pc_sal = find_pc_line (stop_pc, 0);
4678
4679 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4680 the trampoline processing logic, however, there are some trampolines
4681 that have no names, so we should do trampoline handling first. */
4682 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4683 && ecs->stop_func_name == NULL
4684 && stop_pc_sal.line == 0)
4685 {
4686 if (debug_infrun)
4687 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4688
4689 /* The inferior just stepped into, or returned to, an
4690 undebuggable function (where there is no debugging information
4691 and no line number corresponding to the address where the
4692 inferior stopped). Since we want to skip this kind of code,
4693 we keep going until the inferior returns from this
4694 function - unless the user has asked us not to (via
4695 set step-mode) or we no longer know how to get back
4696 to the call site. */
4697 if (step_stop_if_no_debug
4698 || !frame_id_p (frame_unwind_caller_id (frame)))
4699 {
4700 /* If we have no line number and the step-stop-if-no-debug
4701 is set, we stop the step so that the user has a chance to
4702 switch in assembly mode. */
4703 ecs->event_thread->stop_step = 1;
4704 print_end_stepping_range_reason ();
4705 stop_stepping (ecs);
4706 return;
4707 }
4708 else
4709 {
4710 /* Set a breakpoint at callee's return address (the address
4711 at which the caller will resume). */
4712 insert_step_resume_breakpoint_at_caller (frame);
4713 keep_going (ecs);
4714 return;
4715 }
4716 }
4717
4718 if (ecs->event_thread->step_range_end == 1)
4719 {
4720 /* It is stepi or nexti. We always want to stop stepping after
4721 one instruction. */
4722 if (debug_infrun)
4723 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4724 ecs->event_thread->stop_step = 1;
4725 print_end_stepping_range_reason ();
4726 stop_stepping (ecs);
4727 return;
4728 }
4729
4730 if (stop_pc_sal.line == 0)
4731 {
4732 /* We have no line number information. That means to stop
4733 stepping (does this always happen right after one instruction,
4734 when we do "s" in a function with no line numbers,
4735 or can this happen as a result of a return or longjmp?). */
4736 if (debug_infrun)
4737 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4738 ecs->event_thread->stop_step = 1;
4739 print_end_stepping_range_reason ();
4740 stop_stepping (ecs);
4741 return;
4742 }
4743
4744 /* Look for "calls" to inlined functions, part one. If the inline
4745 frame machinery detected some skipped call sites, we have entered
4746 a new inline function. */
4747
4748 if (frame_id_eq (get_frame_id (get_current_frame ()),
4749 ecs->event_thread->step_frame_id)
4750 && inline_skipped_frames (ecs->ptid))
4751 {
4752 struct symtab_and_line call_sal;
4753
4754 if (debug_infrun)
4755 fprintf_unfiltered (gdb_stdlog,
4756 "infrun: stepped into inlined function\n");
4757
4758 find_frame_sal (get_current_frame (), &call_sal);
4759
4760 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4761 {
4762 /* For "step", we're going to stop. But if the call site
4763 for this inlined function is on the same source line as
4764 we were previously stepping, go down into the function
4765 first. Otherwise stop at the call site. */
4766
4767 if (call_sal.line == ecs->event_thread->current_line
4768 && call_sal.symtab == ecs->event_thread->current_symtab)
4769 step_into_inline_frame (ecs->ptid);
4770
4771 ecs->event_thread->stop_step = 1;
4772 print_end_stepping_range_reason ();
4773 stop_stepping (ecs);
4774 return;
4775 }
4776 else
4777 {
4778 /* For "next", we should stop at the call site if it is on a
4779 different source line. Otherwise continue through the
4780 inlined function. */
4781 if (call_sal.line == ecs->event_thread->current_line
4782 && call_sal.symtab == ecs->event_thread->current_symtab)
4783 keep_going (ecs);
4784 else
4785 {
4786 ecs->event_thread->stop_step = 1;
4787 print_end_stepping_range_reason ();
4788 stop_stepping (ecs);
4789 }
4790 return;
4791 }
4792 }
4793
4794 /* Look for "calls" to inlined functions, part two. If we are still
4795 in the same real function we were stepping through, but we have
4796 to go further up to find the exact frame ID, we are stepping
4797 through a more inlined call beyond its call site. */
4798
4799 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4800 && !frame_id_eq (get_frame_id (get_current_frame ()),
4801 ecs->event_thread->step_frame_id)
4802 && stepped_in_from (get_current_frame (),
4803 ecs->event_thread->step_frame_id))
4804 {
4805 if (debug_infrun)
4806 fprintf_unfiltered (gdb_stdlog,
4807 "infrun: stepping through inlined function\n");
4808
4809 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4810 keep_going (ecs);
4811 else
4812 {
4813 ecs->event_thread->stop_step = 1;
4814 print_end_stepping_range_reason ();
4815 stop_stepping (ecs);
4816 }
4817 return;
4818 }
4819
4820 if ((stop_pc == stop_pc_sal.pc)
4821 && (ecs->event_thread->current_line != stop_pc_sal.line
4822 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4823 {
4824 /* We are at the start of a different line. So stop. Note that
4825 we don't stop if we step into the middle of a different line.
4826 That is said to make things like for (;;) statements work
4827 better. */
4828 if (debug_infrun)
4829 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4830 ecs->event_thread->stop_step = 1;
4831 print_end_stepping_range_reason ();
4832 stop_stepping (ecs);
4833 return;
4834 }
4835
4836 /* We aren't done stepping.
4837
4838 Optimize by setting the stepping range to the line.
4839 (We might not be in the original line, but if we entered a
4840 new line in mid-statement, we continue stepping. This makes
4841 things like for(;;) statements work better.) */
4842
4843 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4844 ecs->event_thread->step_range_end = stop_pc_sal.end;
4845 set_step_info (frame, stop_pc_sal);
4846
4847 if (debug_infrun)
4848 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4849 keep_going (ecs);
4850 }
4851
4852 /* Is thread TP in the middle of single-stepping? */
4853
4854 static int
4855 currently_stepping (struct thread_info *tp)
4856 {
4857 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4858 || tp->trap_expected
4859 || tp->stepping_through_solib_after_catch
4860 || bpstat_should_step ());
4861 }
4862
4863 /* Returns true if any thread *but* the one passed in "data" is in the
4864 middle of stepping or of handling a "next". */
4865
4866 static int
4867 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4868 {
4869 if (tp == data)
4870 return 0;
4871
4872 return (tp->step_range_end
4873 || tp->trap_expected
4874 || tp->stepping_through_solib_after_catch);
4875 }
4876
4877 /* Inferior has stepped into a subroutine call with source code that
4878 we should not step over. Do step to the first line of code in
4879 it. */
4880
4881 static void
4882 handle_step_into_function (struct gdbarch *gdbarch,
4883 struct execution_control_state *ecs)
4884 {
4885 struct symtab *s;
4886 struct symtab_and_line stop_func_sal, sr_sal;
4887
4888 s = find_pc_symtab (stop_pc);
4889 if (s && s->language != language_asm)
4890 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4891 ecs->stop_func_start);
4892
4893 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4894 /* Use the step_resume_break to step until the end of the prologue,
4895 even if that involves jumps (as it seems to on the vax under
4896 4.2). */
4897 /* If the prologue ends in the middle of a source line, continue to
4898 the end of that source line (if it is still within the function).
4899 Otherwise, just go to end of prologue. */
4900 if (stop_func_sal.end
4901 && stop_func_sal.pc != ecs->stop_func_start
4902 && stop_func_sal.end < ecs->stop_func_end)
4903 ecs->stop_func_start = stop_func_sal.end;
4904
4905 /* Architectures which require breakpoint adjustment might not be able
4906 to place a breakpoint at the computed address. If so, the test
4907 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4908 ecs->stop_func_start to an address at which a breakpoint may be
4909 legitimately placed.
4910
4911 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4912 made, GDB will enter an infinite loop when stepping through
4913 optimized code consisting of VLIW instructions which contain
4914 subinstructions corresponding to different source lines. On
4915 FR-V, it's not permitted to place a breakpoint on any but the
4916 first subinstruction of a VLIW instruction. When a breakpoint is
4917 set, GDB will adjust the breakpoint address to the beginning of
4918 the VLIW instruction. Thus, we need to make the corresponding
4919 adjustment here when computing the stop address. */
4920
4921 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4922 {
4923 ecs->stop_func_start
4924 = gdbarch_adjust_breakpoint_address (gdbarch,
4925 ecs->stop_func_start);
4926 }
4927
4928 if (ecs->stop_func_start == stop_pc)
4929 {
4930 /* We are already there: stop now. */
4931 ecs->event_thread->stop_step = 1;
4932 print_end_stepping_range_reason ();
4933 stop_stepping (ecs);
4934 return;
4935 }
4936 else
4937 {
4938 /* Put the step-breakpoint there and go until there. */
4939 init_sal (&sr_sal); /* initialize to zeroes */
4940 sr_sal.pc = ecs->stop_func_start;
4941 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4942 sr_sal.pspace = get_frame_program_space (get_current_frame ());
4943
4944 /* Do not specify what the fp should be when we stop since on
4945 some machines the prologue is where the new fp value is
4946 established. */
4947 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4948
4949 /* And make sure stepping stops right away then. */
4950 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4951 }
4952 keep_going (ecs);
4953 }
4954
4955 /* Inferior has stepped backward into a subroutine call with source
4956 code that we should not step over. Do step to the beginning of the
4957 last line of code in it. */
4958
4959 static void
4960 handle_step_into_function_backward (struct gdbarch *gdbarch,
4961 struct execution_control_state *ecs)
4962 {
4963 struct symtab *s;
4964 struct symtab_and_line stop_func_sal;
4965
4966 s = find_pc_symtab (stop_pc);
4967 if (s && s->language != language_asm)
4968 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4969 ecs->stop_func_start);
4970
4971 stop_func_sal = find_pc_line (stop_pc, 0);
4972
4973 /* OK, we're just going to keep stepping here. */
4974 if (stop_func_sal.pc == stop_pc)
4975 {
4976 /* We're there already. Just stop stepping now. */
4977 ecs->event_thread->stop_step = 1;
4978 print_end_stepping_range_reason ();
4979 stop_stepping (ecs);
4980 }
4981 else
4982 {
4983 /* Else just reset the step range and keep going.
4984 No step-resume breakpoint, they don't work for
4985 epilogues, which can have multiple entry paths. */
4986 ecs->event_thread->step_range_start = stop_func_sal.pc;
4987 ecs->event_thread->step_range_end = stop_func_sal.end;
4988 keep_going (ecs);
4989 }
4990 return;
4991 }
4992
4993 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4994 This is used to both functions and to skip over code. */
4995
4996 static void
4997 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4998 struct symtab_and_line sr_sal,
4999 struct frame_id sr_id)
5000 {
5001 /* There should never be more than one step-resume or longjmp-resume
5002 breakpoint per thread, so we should never be setting a new
5003 step_resume_breakpoint when one is already active. */
5004 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5005
5006 if (debug_infrun)
5007 fprintf_unfiltered (gdb_stdlog,
5008 "infrun: inserting step-resume breakpoint at %s\n",
5009 paddress (gdbarch, sr_sal.pc));
5010
5011 inferior_thread ()->step_resume_breakpoint
5012 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
5013 }
5014
5015 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
5016 to skip a potential signal handler.
5017
5018 This is called with the interrupted function's frame. The signal
5019 handler, when it returns, will resume the interrupted function at
5020 RETURN_FRAME.pc. */
5021
5022 static void
5023 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5024 {
5025 struct symtab_and_line sr_sal;
5026 struct gdbarch *gdbarch;
5027
5028 gdb_assert (return_frame != NULL);
5029 init_sal (&sr_sal); /* initialize to zeros */
5030
5031 gdbarch = get_frame_arch (return_frame);
5032 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5033 sr_sal.section = find_pc_overlay (sr_sal.pc);
5034 sr_sal.pspace = get_frame_program_space (return_frame);
5035
5036 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5037 get_stack_frame_id (return_frame));
5038 }
5039
5040 /* Similar to insert_step_resume_breakpoint_at_frame, except
5041 but a breakpoint at the previous frame's PC. This is used to
5042 skip a function after stepping into it (for "next" or if the called
5043 function has no debugging information).
5044
5045 The current function has almost always been reached by single
5046 stepping a call or return instruction. NEXT_FRAME belongs to the
5047 current function, and the breakpoint will be set at the caller's
5048 resume address.
5049
5050 This is a separate function rather than reusing
5051 insert_step_resume_breakpoint_at_frame in order to avoid
5052 get_prev_frame, which may stop prematurely (see the implementation
5053 of frame_unwind_caller_id for an example). */
5054
5055 static void
5056 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5057 {
5058 struct symtab_and_line sr_sal;
5059 struct gdbarch *gdbarch;
5060
5061 /* We shouldn't have gotten here if we don't know where the call site
5062 is. */
5063 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5064
5065 init_sal (&sr_sal); /* initialize to zeros */
5066
5067 gdbarch = frame_unwind_caller_arch (next_frame);
5068 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5069 frame_unwind_caller_pc (next_frame));
5070 sr_sal.section = find_pc_overlay (sr_sal.pc);
5071 sr_sal.pspace = frame_unwind_program_space (next_frame);
5072
5073 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5074 frame_unwind_caller_id (next_frame));
5075 }
5076
5077 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
5078 new breakpoint at the target of a jmp_buf. The handling of
5079 longjmp-resume uses the same mechanisms used for handling
5080 "step-resume" breakpoints. */
5081
5082 static void
5083 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5084 {
5085 /* There should never be more than one step-resume or longjmp-resume
5086 breakpoint per thread, so we should never be setting a new
5087 longjmp_resume_breakpoint when one is already active. */
5088 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5089
5090 if (debug_infrun)
5091 fprintf_unfiltered (gdb_stdlog,
5092 "infrun: inserting longjmp-resume breakpoint at %s\n",
5093 paddress (gdbarch, pc));
5094
5095 inferior_thread ()->step_resume_breakpoint =
5096 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5097 }
5098
5099 static void
5100 stop_stepping (struct execution_control_state *ecs)
5101 {
5102 if (debug_infrun)
5103 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5104
5105 /* Let callers know we don't want to wait for the inferior anymore. */
5106 ecs->wait_some_more = 0;
5107 }
5108
5109 /* This function handles various cases where we need to continue
5110 waiting for the inferior. */
5111 /* (Used to be the keep_going: label in the old wait_for_inferior) */
5112
5113 static void
5114 keep_going (struct execution_control_state *ecs)
5115 {
5116 /* Make sure normal_stop is called if we get a QUIT handled before
5117 reaching resume. */
5118 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5119
5120 /* Save the pc before execution, to compare with pc after stop. */
5121 ecs->event_thread->prev_pc
5122 = regcache_read_pc (get_thread_regcache (ecs->ptid));
5123
5124 /* If we did not do break;, it means we should keep running the
5125 inferior and not return to debugger. */
5126
5127 if (ecs->event_thread->trap_expected
5128 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5129 {
5130 /* We took a signal (which we are supposed to pass through to
5131 the inferior, else we'd not get here) and we haven't yet
5132 gotten our trap. Simply continue. */
5133
5134 discard_cleanups (old_cleanups);
5135 resume (currently_stepping (ecs->event_thread),
5136 ecs->event_thread->stop_signal);
5137 }
5138 else
5139 {
5140 /* Either the trap was not expected, but we are continuing
5141 anyway (the user asked that this signal be passed to the
5142 child)
5143 -- or --
5144 The signal was SIGTRAP, e.g. it was our signal, but we
5145 decided we should resume from it.
5146
5147 We're going to run this baby now!
5148
5149 Note that insert_breakpoints won't try to re-insert
5150 already inserted breakpoints. Therefore, we don't
5151 care if breakpoints were already inserted, or not. */
5152
5153 if (ecs->event_thread->stepping_over_breakpoint)
5154 {
5155 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5156
5157 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5158 /* Since we can't do a displaced step, we have to remove
5159 the breakpoint while we step it. To keep things
5160 simple, we remove them all. */
5161 remove_breakpoints ();
5162 }
5163 else
5164 {
5165 struct gdb_exception e;
5166
5167 /* Stop stepping when inserting breakpoints
5168 has failed. */
5169 TRY_CATCH (e, RETURN_MASK_ERROR)
5170 {
5171 insert_breakpoints ();
5172 }
5173 if (e.reason < 0)
5174 {
5175 exception_print (gdb_stderr, e);
5176 stop_stepping (ecs);
5177 return;
5178 }
5179 }
5180
5181 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5182
5183 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5184 specifies that such a signal should be delivered to the
5185 target program).
5186
5187 Typically, this would occure when a user is debugging a
5188 target monitor on a simulator: the target monitor sets a
5189 breakpoint; the simulator encounters this break-point and
5190 halts the simulation handing control to GDB; GDB, noteing
5191 that the break-point isn't valid, returns control back to the
5192 simulator; the simulator then delivers the hardware
5193 equivalent of a SIGNAL_TRAP to the program being debugged. */
5194
5195 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5196 && !signal_program[ecs->event_thread->stop_signal])
5197 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5198
5199 discard_cleanups (old_cleanups);
5200 resume (currently_stepping (ecs->event_thread),
5201 ecs->event_thread->stop_signal);
5202 }
5203
5204 prepare_to_wait (ecs);
5205 }
5206
5207 /* This function normally comes after a resume, before
5208 handle_inferior_event exits. It takes care of any last bits of
5209 housekeeping, and sets the all-important wait_some_more flag. */
5210
5211 static void
5212 prepare_to_wait (struct execution_control_state *ecs)
5213 {
5214 if (debug_infrun)
5215 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5216
5217 /* This is the old end of the while loop. Let everybody know we
5218 want to wait for the inferior some more and get called again
5219 soon. */
5220 ecs->wait_some_more = 1;
5221 }
5222
5223 /* Several print_*_reason functions to print why the inferior has stopped.
5224 We always print something when the inferior exits, or receives a signal.
5225 The rest of the cases are dealt with later on in normal_stop and
5226 print_it_typical. Ideally there should be a call to one of these
5227 print_*_reason functions functions from handle_inferior_event each time
5228 stop_stepping is called. */
5229
5230 /* Print why the inferior has stopped.
5231 We are done with a step/next/si/ni command, print why the inferior has
5232 stopped. For now print nothing. Print a message only if not in the middle
5233 of doing a "step n" operation for n > 1. */
5234
5235 static void
5236 print_end_stepping_range_reason (void)
5237 {
5238 if ((!inferior_thread ()->step_multi || !inferior_thread ()->stop_step)
5239 && ui_out_is_mi_like_p (uiout))
5240 ui_out_field_string (uiout, "reason",
5241 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5242 }
5243
5244 /* The inferior was terminated by a signal, print why it stopped. */
5245
5246 static void
5247 print_signal_exited_reason (enum target_signal siggnal)
5248 {
5249 annotate_signalled ();
5250 if (ui_out_is_mi_like_p (uiout))
5251 ui_out_field_string
5252 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5253 ui_out_text (uiout, "\nProgram terminated with signal ");
5254 annotate_signal_name ();
5255 ui_out_field_string (uiout, "signal-name",
5256 target_signal_to_name (siggnal));
5257 annotate_signal_name_end ();
5258 ui_out_text (uiout, ", ");
5259 annotate_signal_string ();
5260 ui_out_field_string (uiout, "signal-meaning",
5261 target_signal_to_string (siggnal));
5262 annotate_signal_string_end ();
5263 ui_out_text (uiout, ".\n");
5264 ui_out_text (uiout, "The program no longer exists.\n");
5265 }
5266
5267 /* The inferior program is finished, print why it stopped. */
5268
5269 static void
5270 print_exited_reason (int exitstatus)
5271 {
5272 annotate_exited (exitstatus);
5273 if (exitstatus)
5274 {
5275 if (ui_out_is_mi_like_p (uiout))
5276 ui_out_field_string (uiout, "reason",
5277 async_reason_lookup (EXEC_ASYNC_EXITED));
5278 ui_out_text (uiout, "\nProgram exited with code ");
5279 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
5280 ui_out_text (uiout, ".\n");
5281 }
5282 else
5283 {
5284 if (ui_out_is_mi_like_p (uiout))
5285 ui_out_field_string
5286 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5287 ui_out_text (uiout, "\nProgram exited normally.\n");
5288 }
5289 /* Support the --return-child-result option. */
5290 return_child_result_value = exitstatus;
5291 }
5292
5293 /* Signal received, print why the inferior has stopped. The signal table
5294 tells us to print about it. */
5295
5296 static void
5297 print_signal_received_reason (enum target_signal siggnal)
5298 {
5299 annotate_signal ();
5300
5301 if (siggnal == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5302 {
5303 struct thread_info *t = inferior_thread ();
5304
5305 ui_out_text (uiout, "\n[");
5306 ui_out_field_string (uiout, "thread-name",
5307 target_pid_to_str (t->ptid));
5308 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5309 ui_out_text (uiout, " stopped");
5310 }
5311 else
5312 {
5313 ui_out_text (uiout, "\nProgram received signal ");
5314 annotate_signal_name ();
5315 if (ui_out_is_mi_like_p (uiout))
5316 ui_out_field_string
5317 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5318 ui_out_field_string (uiout, "signal-name",
5319 target_signal_to_name (siggnal));
5320 annotate_signal_name_end ();
5321 ui_out_text (uiout, ", ");
5322 annotate_signal_string ();
5323 ui_out_field_string (uiout, "signal-meaning",
5324 target_signal_to_string (siggnal));
5325 annotate_signal_string_end ();
5326 }
5327 ui_out_text (uiout, ".\n");
5328 }
5329
5330 /* Reverse execution: target ran out of history info, print why the inferior
5331 has stopped. */
5332
5333 static void
5334 print_no_history_reason (void)
5335 {
5336 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5337 }
5338
5339 /* Here to return control to GDB when the inferior stops for real.
5340 Print appropriate messages, remove breakpoints, give terminal our modes.
5341
5342 STOP_PRINT_FRAME nonzero means print the executing frame
5343 (pc, function, args, file, line number and line text).
5344 BREAKPOINTS_FAILED nonzero means stop was due to error
5345 attempting to insert breakpoints. */
5346
5347 void
5348 normal_stop (void)
5349 {
5350 struct target_waitstatus last;
5351 ptid_t last_ptid;
5352 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5353
5354 get_last_target_status (&last_ptid, &last);
5355
5356 /* If an exception is thrown from this point on, make sure to
5357 propagate GDB's knowledge of the executing state to the
5358 frontend/user running state. A QUIT is an easy exception to see
5359 here, so do this before any filtered output. */
5360 if (!non_stop)
5361 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5362 else if (last.kind != TARGET_WAITKIND_SIGNALLED
5363 && last.kind != TARGET_WAITKIND_EXITED)
5364 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5365
5366 /* In non-stop mode, we don't want GDB to switch threads behind the
5367 user's back, to avoid races where the user is typing a command to
5368 apply to thread x, but GDB switches to thread y before the user
5369 finishes entering the command. */
5370
5371 /* As with the notification of thread events, we want to delay
5372 notifying the user that we've switched thread context until
5373 the inferior actually stops.
5374
5375 There's no point in saying anything if the inferior has exited.
5376 Note that SIGNALLED here means "exited with a signal", not
5377 "received a signal". */
5378 if (!non_stop
5379 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5380 && target_has_execution
5381 && last.kind != TARGET_WAITKIND_SIGNALLED
5382 && last.kind != TARGET_WAITKIND_EXITED)
5383 {
5384 target_terminal_ours_for_output ();
5385 printf_filtered (_("[Switching to %s]\n"),
5386 target_pid_to_str (inferior_ptid));
5387 annotate_thread_changed ();
5388 previous_inferior_ptid = inferior_ptid;
5389 }
5390
5391 if (!breakpoints_always_inserted_mode () && target_has_execution)
5392 {
5393 if (remove_breakpoints ())
5394 {
5395 target_terminal_ours_for_output ();
5396 printf_filtered (_("\
5397 Cannot remove breakpoints because program is no longer writable.\n\
5398 Further execution is probably impossible.\n"));
5399 }
5400 }
5401
5402 /* If an auto-display called a function and that got a signal,
5403 delete that auto-display to avoid an infinite recursion. */
5404
5405 if (stopped_by_random_signal)
5406 disable_current_display ();
5407
5408 /* Don't print a message if in the middle of doing a "step n"
5409 operation for n > 1 */
5410 if (target_has_execution
5411 && last.kind != TARGET_WAITKIND_SIGNALLED
5412 && last.kind != TARGET_WAITKIND_EXITED
5413 && inferior_thread ()->step_multi
5414 && inferior_thread ()->stop_step)
5415 goto done;
5416
5417 target_terminal_ours ();
5418
5419 /* Set the current source location. This will also happen if we
5420 display the frame below, but the current SAL will be incorrect
5421 during a user hook-stop function. */
5422 if (has_stack_frames () && !stop_stack_dummy)
5423 set_current_sal_from_frame (get_current_frame (), 1);
5424
5425 /* Let the user/frontend see the threads as stopped. */
5426 do_cleanups (old_chain);
5427
5428 /* Look up the hook_stop and run it (CLI internally handles problem
5429 of stop_command's pre-hook not existing). */
5430 if (stop_command)
5431 catch_errors (hook_stop_stub, stop_command,
5432 "Error while running hook_stop:\n", RETURN_MASK_ALL);
5433
5434 if (!has_stack_frames ())
5435 goto done;
5436
5437 if (last.kind == TARGET_WAITKIND_SIGNALLED
5438 || last.kind == TARGET_WAITKIND_EXITED)
5439 goto done;
5440
5441 /* Select innermost stack frame - i.e., current frame is frame 0,
5442 and current location is based on that.
5443 Don't do this on return from a stack dummy routine,
5444 or if the program has exited. */
5445
5446 if (!stop_stack_dummy)
5447 {
5448 select_frame (get_current_frame ());
5449
5450 /* Print current location without a level number, if
5451 we have changed functions or hit a breakpoint.
5452 Print source line if we have one.
5453 bpstat_print() contains the logic deciding in detail
5454 what to print, based on the event(s) that just occurred. */
5455
5456 /* If --batch-silent is enabled then there's no need to print the current
5457 source location, and to try risks causing an error message about
5458 missing source files. */
5459 if (stop_print_frame && !batch_silent)
5460 {
5461 int bpstat_ret;
5462 int source_flag;
5463 int do_frame_printing = 1;
5464 struct thread_info *tp = inferior_thread ();
5465
5466 bpstat_ret = bpstat_print (tp->stop_bpstat);
5467 switch (bpstat_ret)
5468 {
5469 case PRINT_UNKNOWN:
5470 /* If we had hit a shared library event breakpoint,
5471 bpstat_print would print out this message. If we hit
5472 an OS-level shared library event, do the same
5473 thing. */
5474 if (last.kind == TARGET_WAITKIND_LOADED)
5475 {
5476 printf_filtered (_("Stopped due to shared library event\n"));
5477 source_flag = SRC_LINE; /* something bogus */
5478 do_frame_printing = 0;
5479 break;
5480 }
5481
5482 /* FIXME: cagney/2002-12-01: Given that a frame ID does
5483 (or should) carry around the function and does (or
5484 should) use that when doing a frame comparison. */
5485 if (tp->stop_step
5486 && frame_id_eq (tp->step_frame_id,
5487 get_frame_id (get_current_frame ()))
5488 && step_start_function == find_pc_function (stop_pc))
5489 source_flag = SRC_LINE; /* finished step, just print source line */
5490 else
5491 source_flag = SRC_AND_LOC; /* print location and source line */
5492 break;
5493 case PRINT_SRC_AND_LOC:
5494 source_flag = SRC_AND_LOC; /* print location and source line */
5495 break;
5496 case PRINT_SRC_ONLY:
5497 source_flag = SRC_LINE;
5498 break;
5499 case PRINT_NOTHING:
5500 source_flag = SRC_LINE; /* something bogus */
5501 do_frame_printing = 0;
5502 break;
5503 default:
5504 internal_error (__FILE__, __LINE__, _("Unknown value."));
5505 }
5506
5507 /* The behavior of this routine with respect to the source
5508 flag is:
5509 SRC_LINE: Print only source line
5510 LOCATION: Print only location
5511 SRC_AND_LOC: Print location and source line */
5512 if (do_frame_printing)
5513 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5514
5515 /* Display the auto-display expressions. */
5516 do_displays ();
5517 }
5518 }
5519
5520 /* Save the function value return registers, if we care.
5521 We might be about to restore their previous contents. */
5522 if (inferior_thread ()->proceed_to_finish)
5523 {
5524 /* This should not be necessary. */
5525 if (stop_registers)
5526 regcache_xfree (stop_registers);
5527
5528 /* NB: The copy goes through to the target picking up the value of
5529 all the registers. */
5530 stop_registers = regcache_dup (get_current_regcache ());
5531 }
5532
5533 if (stop_stack_dummy == STOP_STACK_DUMMY)
5534 {
5535 /* Pop the empty frame that contains the stack dummy.
5536 This also restores inferior state prior to the call
5537 (struct inferior_thread_state). */
5538 struct frame_info *frame = get_current_frame ();
5539
5540 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5541 frame_pop (frame);
5542 /* frame_pop() calls reinit_frame_cache as the last thing it does
5543 which means there's currently no selected frame. We don't need
5544 to re-establish a selected frame if the dummy call returns normally,
5545 that will be done by restore_inferior_status. However, we do have
5546 to handle the case where the dummy call is returning after being
5547 stopped (e.g. the dummy call previously hit a breakpoint). We
5548 can't know which case we have so just always re-establish a
5549 selected frame here. */
5550 select_frame (get_current_frame ());
5551 }
5552
5553 done:
5554 annotate_stopped ();
5555
5556 /* Suppress the stop observer if we're in the middle of:
5557
5558 - a step n (n > 1), as there still more steps to be done.
5559
5560 - a "finish" command, as the observer will be called in
5561 finish_command_continuation, so it can include the inferior
5562 function's return value.
5563
5564 - calling an inferior function, as we pretend we inferior didn't
5565 run at all. The return value of the call is handled by the
5566 expression evaluator, through call_function_by_hand. */
5567
5568 if (!target_has_execution
5569 || last.kind == TARGET_WAITKIND_SIGNALLED
5570 || last.kind == TARGET_WAITKIND_EXITED
5571 || (!inferior_thread ()->step_multi
5572 && !(inferior_thread ()->stop_bpstat
5573 && inferior_thread ()->proceed_to_finish)
5574 && !inferior_thread ()->in_infcall))
5575 {
5576 if (!ptid_equal (inferior_ptid, null_ptid))
5577 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5578 stop_print_frame);
5579 else
5580 observer_notify_normal_stop (NULL, stop_print_frame);
5581 }
5582
5583 if (target_has_execution)
5584 {
5585 if (last.kind != TARGET_WAITKIND_SIGNALLED
5586 && last.kind != TARGET_WAITKIND_EXITED)
5587 /* Delete the breakpoint we stopped at, if it wants to be deleted.
5588 Delete any breakpoint that is to be deleted at the next stop. */
5589 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5590 }
5591
5592 /* Try to get rid of automatically added inferiors that are no
5593 longer needed. Keeping those around slows down things linearly.
5594 Note that this never removes the current inferior. */
5595 prune_inferiors ();
5596 }
5597
5598 static int
5599 hook_stop_stub (void *cmd)
5600 {
5601 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5602 return (0);
5603 }
5604 \f
5605 int
5606 signal_stop_state (int signo)
5607 {
5608 return signal_stop[signo];
5609 }
5610
5611 int
5612 signal_print_state (int signo)
5613 {
5614 return signal_print[signo];
5615 }
5616
5617 int
5618 signal_pass_state (int signo)
5619 {
5620 return signal_program[signo];
5621 }
5622
5623 int
5624 signal_stop_update (int signo, int state)
5625 {
5626 int ret = signal_stop[signo];
5627
5628 signal_stop[signo] = state;
5629 return ret;
5630 }
5631
5632 int
5633 signal_print_update (int signo, int state)
5634 {
5635 int ret = signal_print[signo];
5636
5637 signal_print[signo] = state;
5638 return ret;
5639 }
5640
5641 int
5642 signal_pass_update (int signo, int state)
5643 {
5644 int ret = signal_program[signo];
5645
5646 signal_program[signo] = state;
5647 return ret;
5648 }
5649
5650 static void
5651 sig_print_header (void)
5652 {
5653 printf_filtered (_("\
5654 Signal Stop\tPrint\tPass to program\tDescription\n"));
5655 }
5656
5657 static void
5658 sig_print_info (enum target_signal oursig)
5659 {
5660 const char *name = target_signal_to_name (oursig);
5661 int name_padding = 13 - strlen (name);
5662
5663 if (name_padding <= 0)
5664 name_padding = 0;
5665
5666 printf_filtered ("%s", name);
5667 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
5668 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5669 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5670 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5671 printf_filtered ("%s\n", target_signal_to_string (oursig));
5672 }
5673
5674 /* Specify how various signals in the inferior should be handled. */
5675
5676 static void
5677 handle_command (char *args, int from_tty)
5678 {
5679 char **argv;
5680 int digits, wordlen;
5681 int sigfirst, signum, siglast;
5682 enum target_signal oursig;
5683 int allsigs;
5684 int nsigs;
5685 unsigned char *sigs;
5686 struct cleanup *old_chain;
5687
5688 if (args == NULL)
5689 {
5690 error_no_arg (_("signal to handle"));
5691 }
5692
5693 /* Allocate and zero an array of flags for which signals to handle. */
5694
5695 nsigs = (int) TARGET_SIGNAL_LAST;
5696 sigs = (unsigned char *) alloca (nsigs);
5697 memset (sigs, 0, nsigs);
5698
5699 /* Break the command line up into args. */
5700
5701 argv = gdb_buildargv (args);
5702 old_chain = make_cleanup_freeargv (argv);
5703
5704 /* Walk through the args, looking for signal oursigs, signal names, and
5705 actions. Signal numbers and signal names may be interspersed with
5706 actions, with the actions being performed for all signals cumulatively
5707 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5708
5709 while (*argv != NULL)
5710 {
5711 wordlen = strlen (*argv);
5712 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5713 {;
5714 }
5715 allsigs = 0;
5716 sigfirst = siglast = -1;
5717
5718 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5719 {
5720 /* Apply action to all signals except those used by the
5721 debugger. Silently skip those. */
5722 allsigs = 1;
5723 sigfirst = 0;
5724 siglast = nsigs - 1;
5725 }
5726 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5727 {
5728 SET_SIGS (nsigs, sigs, signal_stop);
5729 SET_SIGS (nsigs, sigs, signal_print);
5730 }
5731 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5732 {
5733 UNSET_SIGS (nsigs, sigs, signal_program);
5734 }
5735 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5736 {
5737 SET_SIGS (nsigs, sigs, signal_print);
5738 }
5739 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5740 {
5741 SET_SIGS (nsigs, sigs, signal_program);
5742 }
5743 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5744 {
5745 UNSET_SIGS (nsigs, sigs, signal_stop);
5746 }
5747 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5748 {
5749 SET_SIGS (nsigs, sigs, signal_program);
5750 }
5751 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5752 {
5753 UNSET_SIGS (nsigs, sigs, signal_print);
5754 UNSET_SIGS (nsigs, sigs, signal_stop);
5755 }
5756 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5757 {
5758 UNSET_SIGS (nsigs, sigs, signal_program);
5759 }
5760 else if (digits > 0)
5761 {
5762 /* It is numeric. The numeric signal refers to our own
5763 internal signal numbering from target.h, not to host/target
5764 signal number. This is a feature; users really should be
5765 using symbolic names anyway, and the common ones like
5766 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5767
5768 sigfirst = siglast = (int)
5769 target_signal_from_command (atoi (*argv));
5770 if ((*argv)[digits] == '-')
5771 {
5772 siglast = (int)
5773 target_signal_from_command (atoi ((*argv) + digits + 1));
5774 }
5775 if (sigfirst > siglast)
5776 {
5777 /* Bet he didn't figure we'd think of this case... */
5778 signum = sigfirst;
5779 sigfirst = siglast;
5780 siglast = signum;
5781 }
5782 }
5783 else
5784 {
5785 oursig = target_signal_from_name (*argv);
5786 if (oursig != TARGET_SIGNAL_UNKNOWN)
5787 {
5788 sigfirst = siglast = (int) oursig;
5789 }
5790 else
5791 {
5792 /* Not a number and not a recognized flag word => complain. */
5793 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5794 }
5795 }
5796
5797 /* If any signal numbers or symbol names were found, set flags for
5798 which signals to apply actions to. */
5799
5800 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5801 {
5802 switch ((enum target_signal) signum)
5803 {
5804 case TARGET_SIGNAL_TRAP:
5805 case TARGET_SIGNAL_INT:
5806 if (!allsigs && !sigs[signum])
5807 {
5808 if (query (_("%s is used by the debugger.\n\
5809 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5810 {
5811 sigs[signum] = 1;
5812 }
5813 else
5814 {
5815 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5816 gdb_flush (gdb_stdout);
5817 }
5818 }
5819 break;
5820 case TARGET_SIGNAL_0:
5821 case TARGET_SIGNAL_DEFAULT:
5822 case TARGET_SIGNAL_UNKNOWN:
5823 /* Make sure that "all" doesn't print these. */
5824 break;
5825 default:
5826 sigs[signum] = 1;
5827 break;
5828 }
5829 }
5830
5831 argv++;
5832 }
5833
5834 for (signum = 0; signum < nsigs; signum++)
5835 if (sigs[signum])
5836 {
5837 target_notice_signals (inferior_ptid);
5838
5839 if (from_tty)
5840 {
5841 /* Show the results. */
5842 sig_print_header ();
5843 for (; signum < nsigs; signum++)
5844 if (sigs[signum])
5845 sig_print_info (signum);
5846 }
5847
5848 break;
5849 }
5850
5851 do_cleanups (old_chain);
5852 }
5853
5854 static void
5855 xdb_handle_command (char *args, int from_tty)
5856 {
5857 char **argv;
5858 struct cleanup *old_chain;
5859
5860 if (args == NULL)
5861 error_no_arg (_("xdb command"));
5862
5863 /* Break the command line up into args. */
5864
5865 argv = gdb_buildargv (args);
5866 old_chain = make_cleanup_freeargv (argv);
5867 if (argv[1] != (char *) NULL)
5868 {
5869 char *argBuf;
5870 int bufLen;
5871
5872 bufLen = strlen (argv[0]) + 20;
5873 argBuf = (char *) xmalloc (bufLen);
5874 if (argBuf)
5875 {
5876 int validFlag = 1;
5877 enum target_signal oursig;
5878
5879 oursig = target_signal_from_name (argv[0]);
5880 memset (argBuf, 0, bufLen);
5881 if (strcmp (argv[1], "Q") == 0)
5882 sprintf (argBuf, "%s %s", argv[0], "noprint");
5883 else
5884 {
5885 if (strcmp (argv[1], "s") == 0)
5886 {
5887 if (!signal_stop[oursig])
5888 sprintf (argBuf, "%s %s", argv[0], "stop");
5889 else
5890 sprintf (argBuf, "%s %s", argv[0], "nostop");
5891 }
5892 else if (strcmp (argv[1], "i") == 0)
5893 {
5894 if (!signal_program[oursig])
5895 sprintf (argBuf, "%s %s", argv[0], "pass");
5896 else
5897 sprintf (argBuf, "%s %s", argv[0], "nopass");
5898 }
5899 else if (strcmp (argv[1], "r") == 0)
5900 {
5901 if (!signal_print[oursig])
5902 sprintf (argBuf, "%s %s", argv[0], "print");
5903 else
5904 sprintf (argBuf, "%s %s", argv[0], "noprint");
5905 }
5906 else
5907 validFlag = 0;
5908 }
5909 if (validFlag)
5910 handle_command (argBuf, from_tty);
5911 else
5912 printf_filtered (_("Invalid signal handling flag.\n"));
5913 if (argBuf)
5914 xfree (argBuf);
5915 }
5916 }
5917 do_cleanups (old_chain);
5918 }
5919
5920 /* Print current contents of the tables set by the handle command.
5921 It is possible we should just be printing signals actually used
5922 by the current target (but for things to work right when switching
5923 targets, all signals should be in the signal tables). */
5924
5925 static void
5926 signals_info (char *signum_exp, int from_tty)
5927 {
5928 enum target_signal oursig;
5929
5930 sig_print_header ();
5931
5932 if (signum_exp)
5933 {
5934 /* First see if this is a symbol name. */
5935 oursig = target_signal_from_name (signum_exp);
5936 if (oursig == TARGET_SIGNAL_UNKNOWN)
5937 {
5938 /* No, try numeric. */
5939 oursig =
5940 target_signal_from_command (parse_and_eval_long (signum_exp));
5941 }
5942 sig_print_info (oursig);
5943 return;
5944 }
5945
5946 printf_filtered ("\n");
5947 /* These ugly casts brought to you by the native VAX compiler. */
5948 for (oursig = TARGET_SIGNAL_FIRST;
5949 (int) oursig < (int) TARGET_SIGNAL_LAST;
5950 oursig = (enum target_signal) ((int) oursig + 1))
5951 {
5952 QUIT;
5953
5954 if (oursig != TARGET_SIGNAL_UNKNOWN
5955 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5956 sig_print_info (oursig);
5957 }
5958
5959 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5960 }
5961
5962 /* The $_siginfo convenience variable is a bit special. We don't know
5963 for sure the type of the value until we actually have a chance to
5964 fetch the data. The type can change depending on gdbarch, so it it
5965 also dependent on which thread you have selected.
5966
5967 1. making $_siginfo be an internalvar that creates a new value on
5968 access.
5969
5970 2. making the value of $_siginfo be an lval_computed value. */
5971
5972 /* This function implements the lval_computed support for reading a
5973 $_siginfo value. */
5974
5975 static void
5976 siginfo_value_read (struct value *v)
5977 {
5978 LONGEST transferred;
5979
5980 transferred =
5981 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5982 NULL,
5983 value_contents_all_raw (v),
5984 value_offset (v),
5985 TYPE_LENGTH (value_type (v)));
5986
5987 if (transferred != TYPE_LENGTH (value_type (v)))
5988 error (_("Unable to read siginfo"));
5989 }
5990
5991 /* This function implements the lval_computed support for writing a
5992 $_siginfo value. */
5993
5994 static void
5995 siginfo_value_write (struct value *v, struct value *fromval)
5996 {
5997 LONGEST transferred;
5998
5999 transferred = target_write (&current_target,
6000 TARGET_OBJECT_SIGNAL_INFO,
6001 NULL,
6002 value_contents_all_raw (fromval),
6003 value_offset (v),
6004 TYPE_LENGTH (value_type (fromval)));
6005
6006 if (transferred != TYPE_LENGTH (value_type (fromval)))
6007 error (_("Unable to write siginfo"));
6008 }
6009
6010 static struct lval_funcs siginfo_value_funcs =
6011 {
6012 siginfo_value_read,
6013 siginfo_value_write
6014 };
6015
6016 /* Return a new value with the correct type for the siginfo object of
6017 the current thread using architecture GDBARCH. Return a void value
6018 if there's no object available. */
6019
6020 static struct value *
6021 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
6022 {
6023 if (target_has_stack
6024 && !ptid_equal (inferior_ptid, null_ptid)
6025 && gdbarch_get_siginfo_type_p (gdbarch))
6026 {
6027 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6028
6029 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
6030 }
6031
6032 return allocate_value (builtin_type (gdbarch)->builtin_void);
6033 }
6034
6035 \f
6036 /* Inferior thread state.
6037 These are details related to the inferior itself, and don't include
6038 things like what frame the user had selected or what gdb was doing
6039 with the target at the time.
6040 For inferior function calls these are things we want to restore
6041 regardless of whether the function call successfully completes
6042 or the dummy frame has to be manually popped. */
6043
6044 struct inferior_thread_state
6045 {
6046 enum target_signal stop_signal;
6047 CORE_ADDR stop_pc;
6048 struct regcache *registers;
6049
6050 /* Format of SIGINFO or NULL if it is not present. */
6051 struct gdbarch *siginfo_gdbarch;
6052
6053 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
6054 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
6055 content would be invalid. */
6056 gdb_byte *siginfo_data;
6057 };
6058
6059 struct inferior_thread_state *
6060 save_inferior_thread_state (void)
6061 {
6062 struct inferior_thread_state *inf_state;
6063 struct thread_info *tp = inferior_thread ();
6064 struct regcache *regcache = get_current_regcache ();
6065 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6066 gdb_byte *siginfo_data = NULL;
6067
6068 if (gdbarch_get_siginfo_type_p (gdbarch))
6069 {
6070 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6071 size_t len = TYPE_LENGTH (type);
6072 struct cleanup *back_to;
6073
6074 siginfo_data = xmalloc (len);
6075 back_to = make_cleanup (xfree, siginfo_data);
6076
6077 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6078 siginfo_data, 0, len) == len)
6079 discard_cleanups (back_to);
6080 else
6081 {
6082 /* Errors ignored. */
6083 do_cleanups (back_to);
6084 siginfo_data = NULL;
6085 }
6086 }
6087
6088 inf_state = XZALLOC (struct inferior_thread_state);
6089
6090 if (siginfo_data)
6091 {
6092 inf_state->siginfo_gdbarch = gdbarch;
6093 inf_state->siginfo_data = siginfo_data;
6094 }
6095
6096 inf_state->stop_signal = tp->stop_signal;
6097 inf_state->stop_pc = stop_pc;
6098
6099 inf_state->registers = regcache_dup (regcache);
6100
6101 return inf_state;
6102 }
6103
6104 /* Restore inferior session state to INF_STATE. */
6105
6106 void
6107 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6108 {
6109 struct thread_info *tp = inferior_thread ();
6110 struct regcache *regcache = get_current_regcache ();
6111 struct gdbarch *gdbarch = get_regcache_arch (regcache);
6112
6113 tp->stop_signal = inf_state->stop_signal;
6114 stop_pc = inf_state->stop_pc;
6115
6116 if (inf_state->siginfo_gdbarch == gdbarch)
6117 {
6118 struct type *type = gdbarch_get_siginfo_type (gdbarch);
6119 size_t len = TYPE_LENGTH (type);
6120
6121 /* Errors ignored. */
6122 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
6123 inf_state->siginfo_data, 0, len);
6124 }
6125
6126 /* The inferior can be gone if the user types "print exit(0)"
6127 (and perhaps other times). */
6128 if (target_has_execution)
6129 /* NB: The register write goes through to the target. */
6130 regcache_cpy (regcache, inf_state->registers);
6131
6132 discard_inferior_thread_state (inf_state);
6133 }
6134
6135 static void
6136 do_restore_inferior_thread_state_cleanup (void *state)
6137 {
6138 restore_inferior_thread_state (state);
6139 }
6140
6141 struct cleanup *
6142 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6143 {
6144 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6145 }
6146
6147 void
6148 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6149 {
6150 regcache_xfree (inf_state->registers);
6151 xfree (inf_state->siginfo_data);
6152 xfree (inf_state);
6153 }
6154
6155 struct regcache *
6156 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6157 {
6158 return inf_state->registers;
6159 }
6160
6161 /* Session related state for inferior function calls.
6162 These are the additional bits of state that need to be restored
6163 when an inferior function call successfully completes.
6164
6165 Keep the fields in order as present in their original structures. */
6166
6167 struct inferior_status
6168 {
6169 /* Direct copies of the struct thread_info fields: */
6170 CORE_ADDR step_range_start;
6171 CORE_ADDR step_range_end;
6172 struct frame_id step_frame_id;
6173 struct frame_id step_stack_frame_id;
6174 int stepping_over_breakpoint;
6175 int proceed_to_finish;
6176 int in_infcall;
6177 enum step_over_calls_kind step_over_calls;
6178 int stop_step;
6179 bpstat stop_bpstat;
6180
6181 /* Direct copies of the struct inferior fields: */
6182 int stop_soon;
6183
6184 /* Other fields: */
6185 enum stop_stack_kind stop_stack_dummy;
6186 int stopped_by_random_signal;
6187 CORE_ADDR step_resume_break_address;
6188 int stop_after_trap;
6189
6190 /* ID if the selected frame when the inferior function call was made. */
6191 struct frame_id selected_frame_id;
6192 };
6193
6194 /* Save all of the information associated with the inferior<==>gdb
6195 connection. */
6196
6197 struct inferior_status *
6198 save_inferior_status (void)
6199 {
6200 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6201 struct thread_info *tp = inferior_thread ();
6202 struct inferior *inf = current_inferior ();
6203
6204 /* Direct copies of the struct thread_info fields: */
6205 inf_status->step_range_start = tp->step_range_start;
6206 inf_status->step_range_end = tp->step_range_end;
6207 inf_status->step_frame_id = tp->step_frame_id;
6208 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6209 inf_status->stepping_over_breakpoint = tp->trap_expected;
6210 inf_status->proceed_to_finish = tp->proceed_to_finish;
6211 inf_status->in_infcall = tp->in_infcall;
6212 inf_status->step_over_calls = tp->step_over_calls;
6213 inf_status->stop_step = tp->stop_step;
6214
6215 /* Save original bpstat chain here; replace it with copy of chain.
6216 If caller's caller is walking the chain, they'll be happier if we
6217 hand them back the original chain when restore_inferior_status is
6218 called. */
6219 inf_status->stop_bpstat = tp->stop_bpstat;
6220 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6221
6222 /* Direct copies of the struct inferior fields: */
6223 inf_status->stop_soon = inf->stop_soon;
6224
6225 /* Other fields: */
6226 inf_status->stop_stack_dummy = stop_stack_dummy;
6227 inf_status->stopped_by_random_signal = stopped_by_random_signal;
6228 inf_status->stop_after_trap = stop_after_trap;
6229
6230 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6231
6232 return inf_status;
6233 }
6234
6235 static int
6236 restore_selected_frame (void *args)
6237 {
6238 struct frame_id *fid = (struct frame_id *) args;
6239 struct frame_info *frame;
6240
6241 frame = frame_find_by_id (*fid);
6242
6243 /* If inf_status->selected_frame_id is NULL, there was no previously
6244 selected frame. */
6245 if (frame == NULL)
6246 {
6247 warning (_("Unable to restore previously selected frame."));
6248 return 0;
6249 }
6250
6251 select_frame (frame);
6252
6253 return (1);
6254 }
6255
6256 /* Restore inferior session state to INF_STATUS. */
6257
6258 void
6259 restore_inferior_status (struct inferior_status *inf_status)
6260 {
6261 struct thread_info *tp = inferior_thread ();
6262 struct inferior *inf = current_inferior ();
6263
6264 /* Direct copies of the struct thread_info fields: */
6265 tp->step_range_start = inf_status->step_range_start;
6266 tp->step_range_end = inf_status->step_range_end;
6267 tp->step_frame_id = inf_status->step_frame_id;
6268 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6269 tp->trap_expected = inf_status->stepping_over_breakpoint;
6270 tp->proceed_to_finish = inf_status->proceed_to_finish;
6271 tp->in_infcall = inf_status->in_infcall;
6272 tp->step_over_calls = inf_status->step_over_calls;
6273 tp->stop_step = inf_status->stop_step;
6274
6275 /* Handle the bpstat_copy of the chain. */
6276 bpstat_clear (&tp->stop_bpstat);
6277 tp->stop_bpstat = inf_status->stop_bpstat;
6278 inf_status->stop_bpstat = NULL;
6279
6280 /* Direct copies of the struct inferior fields: */
6281 inf->stop_soon = inf_status->stop_soon;
6282
6283 /* Other fields: */
6284 stop_stack_dummy = inf_status->stop_stack_dummy;
6285 stopped_by_random_signal = inf_status->stopped_by_random_signal;
6286 stop_after_trap = inf_status->stop_after_trap;
6287
6288 if (target_has_stack)
6289 {
6290 /* The point of catch_errors is that if the stack is clobbered,
6291 walking the stack might encounter a garbage pointer and
6292 error() trying to dereference it. */
6293 if (catch_errors
6294 (restore_selected_frame, &inf_status->selected_frame_id,
6295 "Unable to restore previously selected frame:\n",
6296 RETURN_MASK_ERROR) == 0)
6297 /* Error in restoring the selected frame. Select the innermost
6298 frame. */
6299 select_frame (get_current_frame ());
6300 }
6301
6302 xfree (inf_status);
6303 }
6304
6305 static void
6306 do_restore_inferior_status_cleanup (void *sts)
6307 {
6308 restore_inferior_status (sts);
6309 }
6310
6311 struct cleanup *
6312 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6313 {
6314 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6315 }
6316
6317 void
6318 discard_inferior_status (struct inferior_status *inf_status)
6319 {
6320 /* See save_inferior_status for info on stop_bpstat. */
6321 bpstat_clear (&inf_status->stop_bpstat);
6322 xfree (inf_status);
6323 }
6324 \f
6325 int
6326 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6327 {
6328 struct target_waitstatus last;
6329 ptid_t last_ptid;
6330
6331 get_last_target_status (&last_ptid, &last);
6332
6333 if (last.kind != TARGET_WAITKIND_FORKED)
6334 return 0;
6335
6336 if (!ptid_equal (last_ptid, pid))
6337 return 0;
6338
6339 *child_pid = last.value.related_pid;
6340 return 1;
6341 }
6342
6343 int
6344 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6345 {
6346 struct target_waitstatus last;
6347 ptid_t last_ptid;
6348
6349 get_last_target_status (&last_ptid, &last);
6350
6351 if (last.kind != TARGET_WAITKIND_VFORKED)
6352 return 0;
6353
6354 if (!ptid_equal (last_ptid, pid))
6355 return 0;
6356
6357 *child_pid = last.value.related_pid;
6358 return 1;
6359 }
6360
6361 int
6362 inferior_has_execd (ptid_t pid, char **execd_pathname)
6363 {
6364 struct target_waitstatus last;
6365 ptid_t last_ptid;
6366
6367 get_last_target_status (&last_ptid, &last);
6368
6369 if (last.kind != TARGET_WAITKIND_EXECD)
6370 return 0;
6371
6372 if (!ptid_equal (last_ptid, pid))
6373 return 0;
6374
6375 *execd_pathname = xstrdup (last.value.execd_pathname);
6376 return 1;
6377 }
6378
6379 int
6380 inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6381 {
6382 struct target_waitstatus last;
6383 ptid_t last_ptid;
6384
6385 get_last_target_status (&last_ptid, &last);
6386
6387 if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6388 last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6389 return 0;
6390
6391 if (!ptid_equal (last_ptid, pid))
6392 return 0;
6393
6394 *syscall_number = last.value.syscall_number;
6395 return 1;
6396 }
6397
6398 /* Oft used ptids */
6399 ptid_t null_ptid;
6400 ptid_t minus_one_ptid;
6401
6402 /* Create a ptid given the necessary PID, LWP, and TID components. */
6403
6404 ptid_t
6405 ptid_build (int pid, long lwp, long tid)
6406 {
6407 ptid_t ptid;
6408
6409 ptid.pid = pid;
6410 ptid.lwp = lwp;
6411 ptid.tid = tid;
6412 return ptid;
6413 }
6414
6415 /* Create a ptid from just a pid. */
6416
6417 ptid_t
6418 pid_to_ptid (int pid)
6419 {
6420 return ptid_build (pid, 0, 0);
6421 }
6422
6423 /* Fetch the pid (process id) component from a ptid. */
6424
6425 int
6426 ptid_get_pid (ptid_t ptid)
6427 {
6428 return ptid.pid;
6429 }
6430
6431 /* Fetch the lwp (lightweight process) component from a ptid. */
6432
6433 long
6434 ptid_get_lwp (ptid_t ptid)
6435 {
6436 return ptid.lwp;
6437 }
6438
6439 /* Fetch the tid (thread id) component from a ptid. */
6440
6441 long
6442 ptid_get_tid (ptid_t ptid)
6443 {
6444 return ptid.tid;
6445 }
6446
6447 /* ptid_equal() is used to test equality of two ptids. */
6448
6449 int
6450 ptid_equal (ptid_t ptid1, ptid_t ptid2)
6451 {
6452 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6453 && ptid1.tid == ptid2.tid);
6454 }
6455
6456 /* Returns true if PTID represents a process. */
6457
6458 int
6459 ptid_is_pid (ptid_t ptid)
6460 {
6461 if (ptid_equal (minus_one_ptid, ptid))
6462 return 0;
6463 if (ptid_equal (null_ptid, ptid))
6464 return 0;
6465
6466 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6467 }
6468
6469 int
6470 ptid_match (ptid_t ptid, ptid_t filter)
6471 {
6472 /* Since both parameters have the same type, prevent easy mistakes
6473 from happening. */
6474 gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6475 && !ptid_equal (ptid, null_ptid));
6476
6477 if (ptid_equal (filter, minus_one_ptid))
6478 return 1;
6479 if (ptid_is_pid (filter)
6480 && ptid_get_pid (ptid) == ptid_get_pid (filter))
6481 return 1;
6482 else if (ptid_equal (ptid, filter))
6483 return 1;
6484
6485 return 0;
6486 }
6487
6488 /* restore_inferior_ptid() will be used by the cleanup machinery
6489 to restore the inferior_ptid value saved in a call to
6490 save_inferior_ptid(). */
6491
6492 static void
6493 restore_inferior_ptid (void *arg)
6494 {
6495 ptid_t *saved_ptid_ptr = arg;
6496
6497 inferior_ptid = *saved_ptid_ptr;
6498 xfree (arg);
6499 }
6500
6501 /* Save the value of inferior_ptid so that it may be restored by a
6502 later call to do_cleanups(). Returns the struct cleanup pointer
6503 needed for later doing the cleanup. */
6504
6505 struct cleanup *
6506 save_inferior_ptid (void)
6507 {
6508 ptid_t *saved_ptid_ptr;
6509
6510 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6511 *saved_ptid_ptr = inferior_ptid;
6512 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6513 }
6514 \f
6515
6516 /* User interface for reverse debugging:
6517 Set exec-direction / show exec-direction commands
6518 (returns error unless target implements to_set_exec_direction method). */
6519
6520 enum exec_direction_kind execution_direction = EXEC_FORWARD;
6521 static const char exec_forward[] = "forward";
6522 static const char exec_reverse[] = "reverse";
6523 static const char *exec_direction = exec_forward;
6524 static const char *exec_direction_names[] = {
6525 exec_forward,
6526 exec_reverse,
6527 NULL
6528 };
6529
6530 static void
6531 set_exec_direction_func (char *args, int from_tty,
6532 struct cmd_list_element *cmd)
6533 {
6534 if (target_can_execute_reverse)
6535 {
6536 if (!strcmp (exec_direction, exec_forward))
6537 execution_direction = EXEC_FORWARD;
6538 else if (!strcmp (exec_direction, exec_reverse))
6539 execution_direction = EXEC_REVERSE;
6540 }
6541 else
6542 {
6543 exec_direction = exec_forward;
6544 error (_("Target does not support this operation."));
6545 }
6546 }
6547
6548 static void
6549 show_exec_direction_func (struct ui_file *out, int from_tty,
6550 struct cmd_list_element *cmd, const char *value)
6551 {
6552 switch (execution_direction) {
6553 case EXEC_FORWARD:
6554 fprintf_filtered (out, _("Forward.\n"));
6555 break;
6556 case EXEC_REVERSE:
6557 fprintf_filtered (out, _("Reverse.\n"));
6558 break;
6559 case EXEC_ERROR:
6560 default:
6561 fprintf_filtered (out,
6562 _("Forward (target `%s' does not support exec-direction).\n"),
6563 target_shortname);
6564 break;
6565 }
6566 }
6567
6568 /* User interface for non-stop mode. */
6569
6570 int non_stop = 0;
6571
6572 static void
6573 set_non_stop (char *args, int from_tty,
6574 struct cmd_list_element *c)
6575 {
6576 if (target_has_execution)
6577 {
6578 non_stop_1 = non_stop;
6579 error (_("Cannot change this setting while the inferior is running."));
6580 }
6581
6582 non_stop = non_stop_1;
6583 }
6584
6585 static void
6586 show_non_stop (struct ui_file *file, int from_tty,
6587 struct cmd_list_element *c, const char *value)
6588 {
6589 fprintf_filtered (file,
6590 _("Controlling the inferior in non-stop mode is %s.\n"),
6591 value);
6592 }
6593
6594 static void
6595 show_schedule_multiple (struct ui_file *file, int from_tty,
6596 struct cmd_list_element *c, const char *value)
6597 {
6598 fprintf_filtered (file, _("\
6599 Resuming the execution of threads of all processes is %s.\n"), value);
6600 }
6601
6602 void
6603 _initialize_infrun (void)
6604 {
6605 int i;
6606 int numsigs;
6607
6608 add_info ("signals", signals_info, _("\
6609 What debugger does when program gets various signals.\n\
6610 Specify a signal as argument to print info on that signal only."));
6611 add_info_alias ("handle", "signals", 0);
6612
6613 add_com ("handle", class_run, handle_command, _("\
6614 Specify how to handle a signal.\n\
6615 Args are signals and actions to apply to those signals.\n\
6616 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6617 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6618 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6619 The special arg \"all\" is recognized to mean all signals except those\n\
6620 used by the debugger, typically SIGTRAP and SIGINT.\n\
6621 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6622 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6623 Stop means reenter debugger if this signal happens (implies print).\n\
6624 Print means print a message if this signal happens.\n\
6625 Pass means let program see this signal; otherwise program doesn't know.\n\
6626 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6627 Pass and Stop may be combined."));
6628 if (xdb_commands)
6629 {
6630 add_com ("lz", class_info, signals_info, _("\
6631 What debugger does when program gets various signals.\n\
6632 Specify a signal as argument to print info on that signal only."));
6633 add_com ("z", class_run, xdb_handle_command, _("\
6634 Specify how to handle a signal.\n\
6635 Args are signals and actions to apply to those signals.\n\
6636 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6637 from 1-15 are allowed for compatibility with old versions of GDB.\n\
6638 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6639 The special arg \"all\" is recognized to mean all signals except those\n\
6640 used by the debugger, typically SIGTRAP and SIGINT.\n\
6641 Recognized actions include \"s\" (toggles between stop and nostop),\n\
6642 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6643 nopass), \"Q\" (noprint)\n\
6644 Stop means reenter debugger if this signal happens (implies print).\n\
6645 Print means print a message if this signal happens.\n\
6646 Pass means let program see this signal; otherwise program doesn't know.\n\
6647 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6648 Pass and Stop may be combined."));
6649 }
6650
6651 if (!dbx_commands)
6652 stop_command = add_cmd ("stop", class_obscure,
6653 not_just_help_class_command, _("\
6654 There is no `stop' command, but you can set a hook on `stop'.\n\
6655 This allows you to set a list of commands to be run each time execution\n\
6656 of the program stops."), &cmdlist);
6657
6658 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6659 Set inferior debugging."), _("\
6660 Show inferior debugging."), _("\
6661 When non-zero, inferior specific debugging is enabled."),
6662 NULL,
6663 show_debug_infrun,
6664 &setdebuglist, &showdebuglist);
6665
6666 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6667 Set displaced stepping debugging."), _("\
6668 Show displaced stepping debugging."), _("\
6669 When non-zero, displaced stepping specific debugging is enabled."),
6670 NULL,
6671 show_debug_displaced,
6672 &setdebuglist, &showdebuglist);
6673
6674 add_setshow_boolean_cmd ("non-stop", no_class,
6675 &non_stop_1, _("\
6676 Set whether gdb controls the inferior in non-stop mode."), _("\
6677 Show whether gdb controls the inferior in non-stop mode."), _("\
6678 When debugging a multi-threaded program and this setting is\n\
6679 off (the default, also called all-stop mode), when one thread stops\n\
6680 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6681 all other threads in the program while you interact with the thread of\n\
6682 interest. When you continue or step a thread, you can allow the other\n\
6683 threads to run, or have them remain stopped, but while you inspect any\n\
6684 thread's state, all threads stop.\n\
6685 \n\
6686 In non-stop mode, when one thread stops, other threads can continue\n\
6687 to run freely. You'll be able to step each thread independently,\n\
6688 leave it stopped or free to run as needed."),
6689 set_non_stop,
6690 show_non_stop,
6691 &setlist,
6692 &showlist);
6693
6694 numsigs = (int) TARGET_SIGNAL_LAST;
6695 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6696 signal_print = (unsigned char *)
6697 xmalloc (sizeof (signal_print[0]) * numsigs);
6698 signal_program = (unsigned char *)
6699 xmalloc (sizeof (signal_program[0]) * numsigs);
6700 for (i = 0; i < numsigs; i++)
6701 {
6702 signal_stop[i] = 1;
6703 signal_print[i] = 1;
6704 signal_program[i] = 1;
6705 }
6706
6707 /* Signals caused by debugger's own actions
6708 should not be given to the program afterwards. */
6709 signal_program[TARGET_SIGNAL_TRAP] = 0;
6710 signal_program[TARGET_SIGNAL_INT] = 0;
6711
6712 /* Signals that are not errors should not normally enter the debugger. */
6713 signal_stop[TARGET_SIGNAL_ALRM] = 0;
6714 signal_print[TARGET_SIGNAL_ALRM] = 0;
6715 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6716 signal_print[TARGET_SIGNAL_VTALRM] = 0;
6717 signal_stop[TARGET_SIGNAL_PROF] = 0;
6718 signal_print[TARGET_SIGNAL_PROF] = 0;
6719 signal_stop[TARGET_SIGNAL_CHLD] = 0;
6720 signal_print[TARGET_SIGNAL_CHLD] = 0;
6721 signal_stop[TARGET_SIGNAL_IO] = 0;
6722 signal_print[TARGET_SIGNAL_IO] = 0;
6723 signal_stop[TARGET_SIGNAL_POLL] = 0;
6724 signal_print[TARGET_SIGNAL_POLL] = 0;
6725 signal_stop[TARGET_SIGNAL_URG] = 0;
6726 signal_print[TARGET_SIGNAL_URG] = 0;
6727 signal_stop[TARGET_SIGNAL_WINCH] = 0;
6728 signal_print[TARGET_SIGNAL_WINCH] = 0;
6729
6730 /* These signals are used internally by user-level thread
6731 implementations. (See signal(5) on Solaris.) Like the above
6732 signals, a healthy program receives and handles them as part of
6733 its normal operation. */
6734 signal_stop[TARGET_SIGNAL_LWP] = 0;
6735 signal_print[TARGET_SIGNAL_LWP] = 0;
6736 signal_stop[TARGET_SIGNAL_WAITING] = 0;
6737 signal_print[TARGET_SIGNAL_WAITING] = 0;
6738 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6739 signal_print[TARGET_SIGNAL_CANCEL] = 0;
6740
6741 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6742 &stop_on_solib_events, _("\
6743 Set stopping for shared library events."), _("\
6744 Show stopping for shared library events."), _("\
6745 If nonzero, gdb will give control to the user when the dynamic linker\n\
6746 notifies gdb of shared library events. The most common event of interest\n\
6747 to the user would be loading/unloading of a new library."),
6748 NULL,
6749 show_stop_on_solib_events,
6750 &setlist, &showlist);
6751
6752 add_setshow_enum_cmd ("follow-fork-mode", class_run,
6753 follow_fork_mode_kind_names,
6754 &follow_fork_mode_string, _("\
6755 Set debugger response to a program call of fork or vfork."), _("\
6756 Show debugger response to a program call of fork or vfork."), _("\
6757 A fork or vfork creates a new process. follow-fork-mode can be:\n\
6758 parent - the original process is debugged after a fork\n\
6759 child - the new process is debugged after a fork\n\
6760 The unfollowed process will continue to run.\n\
6761 By default, the debugger will follow the parent process."),
6762 NULL,
6763 show_follow_fork_mode_string,
6764 &setlist, &showlist);
6765
6766 add_setshow_enum_cmd ("follow-exec-mode", class_run,
6767 follow_exec_mode_names,
6768 &follow_exec_mode_string, _("\
6769 Set debugger response to a program call of exec."), _("\
6770 Show debugger response to a program call of exec."), _("\
6771 An exec call replaces the program image of a process.\n\
6772 \n\
6773 follow-exec-mode can be:\n\
6774 \n\
6775 new - the debugger creates a new inferior and rebinds the process\n\
6776 to this new inferior. The program the process was running before\n\
6777 the exec call can be restarted afterwards by restarting the original\n\
6778 inferior.\n\
6779 \n\
6780 same - the debugger keeps the process bound to the same inferior.\n\
6781 The new executable image replaces the previous executable loaded in\n\
6782 the inferior. Restarting the inferior after the exec call restarts\n\
6783 the executable the process was running after the exec call.\n\
6784 \n\
6785 By default, the debugger will use the same inferior."),
6786 NULL,
6787 show_follow_exec_mode_string,
6788 &setlist, &showlist);
6789
6790 add_setshow_enum_cmd ("scheduler-locking", class_run,
6791 scheduler_enums, &scheduler_mode, _("\
6792 Set mode for locking scheduler during execution."), _("\
6793 Show mode for locking scheduler during execution."), _("\
6794 off == no locking (threads may preempt at any time)\n\
6795 on == full locking (no thread except the current thread may run)\n\
6796 step == scheduler locked during every single-step operation.\n\
6797 In this mode, no other thread may run during a step command.\n\
6798 Other threads may run while stepping over a function call ('next')."),
6799 set_schedlock_func, /* traps on target vector */
6800 show_scheduler_mode,
6801 &setlist, &showlist);
6802
6803 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6804 Set mode for resuming threads of all processes."), _("\
6805 Show mode for resuming threads of all processes."), _("\
6806 When on, execution commands (such as 'continue' or 'next') resume all\n\
6807 threads of all processes. When off (which is the default), execution\n\
6808 commands only resume the threads of the current process. The set of\n\
6809 threads that are resumed is further refined by the scheduler-locking\n\
6810 mode (see help set scheduler-locking)."),
6811 NULL,
6812 show_schedule_multiple,
6813 &setlist, &showlist);
6814
6815 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6816 Set mode of the step operation."), _("\
6817 Show mode of the step operation."), _("\
6818 When set, doing a step over a function without debug line information\n\
6819 will stop at the first instruction of that function. Otherwise, the\n\
6820 function is skipped and the step command stops at a different source line."),
6821 NULL,
6822 show_step_stop_if_no_debug,
6823 &setlist, &showlist);
6824
6825 add_setshow_enum_cmd ("displaced-stepping", class_run,
6826 can_use_displaced_stepping_enum,
6827 &can_use_displaced_stepping, _("\
6828 Set debugger's willingness to use displaced stepping."), _("\
6829 Show debugger's willingness to use displaced stepping."), _("\
6830 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6831 supported by the target architecture. If off, gdb will not use displaced\n\
6832 stepping to step over breakpoints, even if such is supported by the target\n\
6833 architecture. If auto (which is the default), gdb will use displaced stepping\n\
6834 if the target architecture supports it and non-stop mode is active, but will not\n\
6835 use it in all-stop mode (see help set non-stop)."),
6836 NULL,
6837 show_can_use_displaced_stepping,
6838 &setlist, &showlist);
6839
6840 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6841 &exec_direction, _("Set direction of execution.\n\
6842 Options are 'forward' or 'reverse'."),
6843 _("Show direction of execution (forward/reverse)."),
6844 _("Tells gdb whether to execute forward or backward."),
6845 set_exec_direction_func, show_exec_direction_func,
6846 &setlist, &showlist);
6847
6848 /* Set/show detach-on-fork: user-settable mode. */
6849
6850 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6851 Set whether gdb will detach the child of a fork."), _("\
6852 Show whether gdb will detach the child of a fork."), _("\
6853 Tells gdb whether to detach the child of a fork."),
6854 NULL, NULL, &setlist, &showlist);
6855
6856 /* ptid initializations */
6857 null_ptid = ptid_build (0, 0, 0);
6858 minus_one_ptid = ptid_build (-1, 0, 0);
6859 inferior_ptid = null_ptid;
6860 target_last_wait_ptid = minus_one_ptid;
6861
6862 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6863 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6864 observer_attach_thread_exit (infrun_thread_thread_exit);
6865 observer_attach_inferior_exit (infrun_inferior_exit);
6866
6867 /* Explicitly create without lookup, since that tries to create a
6868 value with a void typed value, and when we get here, gdbarch
6869 isn't initialized yet. At this point, we're quite sure there
6870 isn't another convenience variable of the same name. */
6871 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6872
6873 add_setshow_boolean_cmd ("observer", no_class,
6874 &observer_mode_1, _("\
6875 Set whether gdb controls the inferior in observer mode."), _("\
6876 Show whether gdb controls the inferior in observer mode."), _("\
6877 In observer mode, GDB can get data from the inferior, but not\n\
6878 affect its execution. Registers and memory may not be changed,\n\
6879 breakpoints may not be set, and the program cannot be interrupted\n\
6880 or signalled."),
6881 set_observer_mode,
6882 show_observer_mode,
6883 &setlist,
6884 &showlist);
6885 }
This page took 0.168804 seconds and 5 git commands to generate.