2009-08-10 Paul Pluzhnikov <ppluzhnikov@google.com>
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6 2008, 2009 Free Software Foundation, Inc.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "gdb_string.h"
25 #include <ctype.h>
26 #include "symtab.h"
27 #include "frame.h"
28 #include "inferior.h"
29 #include "exceptions.h"
30 #include "breakpoint.h"
31 #include "gdb_wait.h"
32 #include "gdbcore.h"
33 #include "gdbcmd.h"
34 #include "cli/cli-script.h"
35 #include "target.h"
36 #include "gdbthread.h"
37 #include "annotate.h"
38 #include "symfile.h"
39 #include "top.h"
40 #include <signal.h>
41 #include "inf-loop.h"
42 #include "regcache.h"
43 #include "value.h"
44 #include "observer.h"
45 #include "language.h"
46 #include "solib.h"
47 #include "main.h"
48 #include "gdb_assert.h"
49 #include "mi/mi-common.h"
50 #include "event-top.h"
51 #include "record.h"
52 #include "inline-frame.h"
53
54 /* Prototypes for local functions */
55
56 static void signals_info (char *, int);
57
58 static void handle_command (char *, int);
59
60 static void sig_print_info (enum target_signal);
61
62 static void sig_print_header (void);
63
64 static void resume_cleanups (void *);
65
66 static int hook_stop_stub (void *);
67
68 static int restore_selected_frame (void *);
69
70 static void build_infrun (void);
71
72 static int follow_fork (void);
73
74 static void set_schedlock_func (char *args, int from_tty,
75 struct cmd_list_element *c);
76
77 static int currently_stepping (struct thread_info *tp);
78
79 static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80 void *data);
81
82 static void xdb_handle_command (char *args, int from_tty);
83
84 static int prepare_to_proceed (int);
85
86 void _initialize_infrun (void);
87
88 void nullify_last_target_wait_ptid (void);
89
90 /* When set, stop the 'step' command if we enter a function which has
91 no line number information. The normal behavior is that we step
92 over such function. */
93 int step_stop_if_no_debug = 0;
94 static void
95 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96 struct cmd_list_element *c, const char *value)
97 {
98 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99 }
100
101 /* In asynchronous mode, but simulating synchronous execution. */
102
103 int sync_execution = 0;
104
105 /* wait_for_inferior and normal_stop use this to notify the user
106 when the inferior stopped in a different thread than it had been
107 running in. */
108
109 static ptid_t previous_inferior_ptid;
110
111 int debug_displaced = 0;
112 static void
113 show_debug_displaced (struct ui_file *file, int from_tty,
114 struct cmd_list_element *c, const char *value)
115 {
116 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
117 }
118
119 static int debug_infrun = 0;
120 static void
121 show_debug_infrun (struct ui_file *file, int from_tty,
122 struct cmd_list_element *c, const char *value)
123 {
124 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
125 }
126
127 /* If the program uses ELF-style shared libraries, then calls to
128 functions in shared libraries go through stubs, which live in a
129 table called the PLT (Procedure Linkage Table). The first time the
130 function is called, the stub sends control to the dynamic linker,
131 which looks up the function's real address, patches the stub so
132 that future calls will go directly to the function, and then passes
133 control to the function.
134
135 If we are stepping at the source level, we don't want to see any of
136 this --- we just want to skip over the stub and the dynamic linker.
137 The simple approach is to single-step until control leaves the
138 dynamic linker.
139
140 However, on some systems (e.g., Red Hat's 5.2 distribution) the
141 dynamic linker calls functions in the shared C library, so you
142 can't tell from the PC alone whether the dynamic linker is still
143 running. In this case, we use a step-resume breakpoint to get us
144 past the dynamic linker, as if we were using "next" to step over a
145 function call.
146
147 in_solib_dynsym_resolve_code() says whether we're in the dynamic
148 linker code or not. Normally, this means we single-step. However,
149 if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
150 address where we can place a step-resume breakpoint to get past the
151 linker's symbol resolution function.
152
153 in_solib_dynsym_resolve_code() can generally be implemented in a
154 pretty portable way, by comparing the PC against the address ranges
155 of the dynamic linker's sections.
156
157 SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
158 it depends on internal details of the dynamic linker. It's usually
159 not too hard to figure out where to put a breakpoint, but it
160 certainly isn't portable. SKIP_SOLIB_RESOLVER should do plenty of
161 sanity checking. If it can't figure things out, returning zero and
162 getting the (possibly confusing) stepping behavior is better than
163 signalling an error, which will obscure the change in the
164 inferior's state. */
165
166 /* This function returns TRUE if pc is the address of an instruction
167 that lies within the dynamic linker (such as the event hook, or the
168 dld itself).
169
170 This function must be used only when a dynamic linker event has
171 been caught, and the inferior is being stepped out of the hook, or
172 undefined results are guaranteed. */
173
174 #ifndef SOLIB_IN_DYNAMIC_LINKER
175 #define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
176 #endif
177
178
179 /* Convert the #defines into values. This is temporary until wfi control
180 flow is completely sorted out. */
181
182 #ifndef CANNOT_STEP_HW_WATCHPOINTS
183 #define CANNOT_STEP_HW_WATCHPOINTS 0
184 #else
185 #undef CANNOT_STEP_HW_WATCHPOINTS
186 #define CANNOT_STEP_HW_WATCHPOINTS 1
187 #endif
188
189 /* Tables of how to react to signals; the user sets them. */
190
191 static unsigned char *signal_stop;
192 static unsigned char *signal_print;
193 static unsigned char *signal_program;
194
195 #define SET_SIGS(nsigs,sigs,flags) \
196 do { \
197 int signum = (nsigs); \
198 while (signum-- > 0) \
199 if ((sigs)[signum]) \
200 (flags)[signum] = 1; \
201 } while (0)
202
203 #define UNSET_SIGS(nsigs,sigs,flags) \
204 do { \
205 int signum = (nsigs); \
206 while (signum-- > 0) \
207 if ((sigs)[signum]) \
208 (flags)[signum] = 0; \
209 } while (0)
210
211 /* Value to pass to target_resume() to cause all threads to resume */
212
213 #define RESUME_ALL minus_one_ptid
214
215 /* Command list pointer for the "stop" placeholder. */
216
217 static struct cmd_list_element *stop_command;
218
219 /* Function inferior was in as of last step command. */
220
221 static struct symbol *step_start_function;
222
223 /* Nonzero if we want to give control to the user when we're notified
224 of shared library events by the dynamic linker. */
225 static int stop_on_solib_events;
226 static void
227 show_stop_on_solib_events (struct ui_file *file, int from_tty,
228 struct cmd_list_element *c, const char *value)
229 {
230 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
231 value);
232 }
233
234 /* Nonzero means expecting a trace trap
235 and should stop the inferior and return silently when it happens. */
236
237 int stop_after_trap;
238
239 /* Save register contents here when executing a "finish" command or are
240 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
241 Thus this contains the return value from the called function (assuming
242 values are returned in a register). */
243
244 struct regcache *stop_registers;
245
246 /* Nonzero after stop if current stack frame should be printed. */
247
248 static int stop_print_frame;
249
250 /* This is a cached copy of the pid/waitstatus of the last event
251 returned by target_wait()/deprecated_target_wait_hook(). This
252 information is returned by get_last_target_status(). */
253 static ptid_t target_last_wait_ptid;
254 static struct target_waitstatus target_last_waitstatus;
255
256 static void context_switch (ptid_t ptid);
257
258 void init_thread_stepping_state (struct thread_info *tss);
259
260 void init_infwait_state (void);
261
262 static const char follow_fork_mode_child[] = "child";
263 static const char follow_fork_mode_parent[] = "parent";
264
265 static const char *follow_fork_mode_kind_names[] = {
266 follow_fork_mode_child,
267 follow_fork_mode_parent,
268 NULL
269 };
270
271 static const char *follow_fork_mode_string = follow_fork_mode_parent;
272 static void
273 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
274 struct cmd_list_element *c, const char *value)
275 {
276 fprintf_filtered (file, _("\
277 Debugger response to a program call of fork or vfork is \"%s\".\n"),
278 value);
279 }
280 \f
281
282 /* Tell the target to follow the fork we're stopped at. Returns true
283 if the inferior should be resumed; false, if the target for some
284 reason decided it's best not to resume. */
285
286 static int
287 follow_fork (void)
288 {
289 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
290 int should_resume = 1;
291 struct thread_info *tp;
292
293 /* Copy user stepping state to the new inferior thread. FIXME: the
294 followed fork child thread should have a copy of most of the
295 parent thread structure's run control related fields, not just these.
296 Initialized to avoid "may be used uninitialized" warnings from gcc. */
297 struct breakpoint *step_resume_breakpoint = NULL;
298 CORE_ADDR step_range_start = 0;
299 CORE_ADDR step_range_end = 0;
300 struct frame_id step_frame_id = { 0 };
301
302 if (!non_stop)
303 {
304 ptid_t wait_ptid;
305 struct target_waitstatus wait_status;
306
307 /* Get the last target status returned by target_wait(). */
308 get_last_target_status (&wait_ptid, &wait_status);
309
310 /* If not stopped at a fork event, then there's nothing else to
311 do. */
312 if (wait_status.kind != TARGET_WAITKIND_FORKED
313 && wait_status.kind != TARGET_WAITKIND_VFORKED)
314 return 1;
315
316 /* Check if we switched over from WAIT_PTID, since the event was
317 reported. */
318 if (!ptid_equal (wait_ptid, minus_one_ptid)
319 && !ptid_equal (inferior_ptid, wait_ptid))
320 {
321 /* We did. Switch back to WAIT_PTID thread, to tell the
322 target to follow it (in either direction). We'll
323 afterwards refuse to resume, and inform the user what
324 happened. */
325 switch_to_thread (wait_ptid);
326 should_resume = 0;
327 }
328 }
329
330 tp = inferior_thread ();
331
332 /* If there were any forks/vforks that were caught and are now to be
333 followed, then do so now. */
334 switch (tp->pending_follow.kind)
335 {
336 case TARGET_WAITKIND_FORKED:
337 case TARGET_WAITKIND_VFORKED:
338 {
339 ptid_t parent, child;
340
341 /* If the user did a next/step, etc, over a fork call,
342 preserve the stepping state in the fork child. */
343 if (follow_child && should_resume)
344 {
345 step_resume_breakpoint
346 = clone_momentary_breakpoint (tp->step_resume_breakpoint);
347 step_range_start = tp->step_range_start;
348 step_range_end = tp->step_range_end;
349 step_frame_id = tp->step_frame_id;
350
351 /* For now, delete the parent's sr breakpoint, otherwise,
352 parent/child sr breakpoints are considered duplicates,
353 and the child version will not be installed. Remove
354 this when the breakpoints module becomes aware of
355 inferiors and address spaces. */
356 delete_step_resume_breakpoint (tp);
357 tp->step_range_start = 0;
358 tp->step_range_end = 0;
359 tp->step_frame_id = null_frame_id;
360 }
361
362 parent = inferior_ptid;
363 child = tp->pending_follow.value.related_pid;
364
365 /* Tell the target to do whatever is necessary to follow
366 either parent or child. */
367 if (target_follow_fork (follow_child))
368 {
369 /* Target refused to follow, or there's some other reason
370 we shouldn't resume. */
371 should_resume = 0;
372 }
373 else
374 {
375 /* This pending follow fork event is now handled, one way
376 or another. The previous selected thread may be gone
377 from the lists by now, but if it is still around, need
378 to clear the pending follow request. */
379 tp = find_thread_ptid (parent);
380 if (tp)
381 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
382
383 /* This makes sure we don't try to apply the "Switched
384 over from WAIT_PID" logic above. */
385 nullify_last_target_wait_ptid ();
386
387 /* If we followed the child, switch to it... */
388 if (follow_child)
389 {
390 switch_to_thread (child);
391
392 /* ... and preserve the stepping state, in case the
393 user was stepping over the fork call. */
394 if (should_resume)
395 {
396 tp = inferior_thread ();
397 tp->step_resume_breakpoint = step_resume_breakpoint;
398 tp->step_range_start = step_range_start;
399 tp->step_range_end = step_range_end;
400 tp->step_frame_id = step_frame_id;
401 }
402 else
403 {
404 /* If we get here, it was because we're trying to
405 resume from a fork catchpoint, but, the user
406 has switched threads away from the thread that
407 forked. In that case, the resume command
408 issued is most likely not applicable to the
409 child, so just warn, and refuse to resume. */
410 warning (_("\
411 Not resuming: switched threads before following fork child.\n"));
412 }
413
414 /* Reset breakpoints in the child as appropriate. */
415 follow_inferior_reset_breakpoints ();
416 }
417 else
418 switch_to_thread (parent);
419 }
420 }
421 break;
422 case TARGET_WAITKIND_SPURIOUS:
423 /* Nothing to follow. */
424 break;
425 default:
426 internal_error (__FILE__, __LINE__,
427 "Unexpected pending_follow.kind %d\n",
428 tp->pending_follow.kind);
429 break;
430 }
431
432 return should_resume;
433 }
434
435 void
436 follow_inferior_reset_breakpoints (void)
437 {
438 struct thread_info *tp = inferior_thread ();
439
440 /* Was there a step_resume breakpoint? (There was if the user
441 did a "next" at the fork() call.) If so, explicitly reset its
442 thread number.
443
444 step_resumes are a form of bp that are made to be per-thread.
445 Since we created the step_resume bp when the parent process
446 was being debugged, and now are switching to the child process,
447 from the breakpoint package's viewpoint, that's a switch of
448 "threads". We must update the bp's notion of which thread
449 it is for, or it'll be ignored when it triggers. */
450
451 if (tp->step_resume_breakpoint)
452 breakpoint_re_set_thread (tp->step_resume_breakpoint);
453
454 /* Reinsert all breakpoints in the child. The user may have set
455 breakpoints after catching the fork, in which case those
456 were never set in the child, but only in the parent. This makes
457 sure the inserted breakpoints match the breakpoint list. */
458
459 breakpoint_re_set ();
460 insert_breakpoints ();
461 }
462
463 /* EXECD_PATHNAME is assumed to be non-NULL. */
464
465 static void
466 follow_exec (ptid_t pid, char *execd_pathname)
467 {
468 struct target_ops *tgt;
469 struct thread_info *th = inferior_thread ();
470
471 /* This is an exec event that we actually wish to pay attention to.
472 Refresh our symbol table to the newly exec'd program, remove any
473 momentary bp's, etc.
474
475 If there are breakpoints, they aren't really inserted now,
476 since the exec() transformed our inferior into a fresh set
477 of instructions.
478
479 We want to preserve symbolic breakpoints on the list, since
480 we have hopes that they can be reset after the new a.out's
481 symbol table is read.
482
483 However, any "raw" breakpoints must be removed from the list
484 (e.g., the solib bp's), since their address is probably invalid
485 now.
486
487 And, we DON'T want to call delete_breakpoints() here, since
488 that may write the bp's "shadow contents" (the instruction
489 value that was overwritten witha TRAP instruction). Since
490 we now have a new a.out, those shadow contents aren't valid. */
491 update_breakpoints_after_exec ();
492
493 /* If there was one, it's gone now. We cannot truly step-to-next
494 statement through an exec(). */
495 th->step_resume_breakpoint = NULL;
496 th->step_range_start = 0;
497 th->step_range_end = 0;
498
499 /* The target reports the exec event to the main thread, even if
500 some other thread does the exec, and even if the main thread was
501 already stopped --- if debugging in non-stop mode, it's possible
502 the user had the main thread held stopped in the previous image
503 --- release it now. This is the same behavior as step-over-exec
504 with scheduler-locking on in all-stop mode. */
505 th->stop_requested = 0;
506
507 /* What is this a.out's name? */
508 printf_unfiltered (_("Executing new program: %s\n"), execd_pathname);
509
510 /* We've followed the inferior through an exec. Therefore, the
511 inferior has essentially been killed & reborn. */
512
513 gdb_flush (gdb_stdout);
514
515 breakpoint_init_inferior (inf_execd);
516
517 if (gdb_sysroot && *gdb_sysroot)
518 {
519 char *name = alloca (strlen (gdb_sysroot)
520 + strlen (execd_pathname)
521 + 1);
522 strcpy (name, gdb_sysroot);
523 strcat (name, execd_pathname);
524 execd_pathname = name;
525 }
526
527 /* That a.out is now the one to use. */
528 exec_file_attach (execd_pathname, 0);
529
530 /* Reset the shared library package. This ensures that we get a
531 shlib event when the child reaches "_start", at which point the
532 dld will have had a chance to initialize the child. */
533 /* Also, loading a symbol file below may trigger symbol lookups, and
534 we don't want those to be satisfied by the libraries of the
535 previous incarnation of this process. */
536 no_shared_libraries (NULL, 0);
537
538 /* Load the main file's symbols. */
539 symbol_file_add_main (execd_pathname, 0);
540
541 #ifdef SOLIB_CREATE_INFERIOR_HOOK
542 SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
543 #else
544 solib_create_inferior_hook ();
545 #endif
546
547 /* Reinsert all breakpoints. (Those which were symbolic have
548 been reset to the proper address in the new a.out, thanks
549 to symbol_file_command...) */
550 insert_breakpoints ();
551
552 /* The next resume of this inferior should bring it to the shlib
553 startup breakpoints. (If the user had also set bp's on
554 "main" from the old (parent) process, then they'll auto-
555 matically get reset there in the new process.) */
556 }
557
558 /* Non-zero if we just simulating a single-step. This is needed
559 because we cannot remove the breakpoints in the inferior process
560 until after the `wait' in `wait_for_inferior'. */
561 static int singlestep_breakpoints_inserted_p = 0;
562
563 /* The thread we inserted single-step breakpoints for. */
564 static ptid_t singlestep_ptid;
565
566 /* PC when we started this single-step. */
567 static CORE_ADDR singlestep_pc;
568
569 /* If another thread hit the singlestep breakpoint, we save the original
570 thread here so that we can resume single-stepping it later. */
571 static ptid_t saved_singlestep_ptid;
572 static int stepping_past_singlestep_breakpoint;
573
574 /* If not equal to null_ptid, this means that after stepping over breakpoint
575 is finished, we need to switch to deferred_step_ptid, and step it.
576
577 The use case is when one thread has hit a breakpoint, and then the user
578 has switched to another thread and issued 'step'. We need to step over
579 breakpoint in the thread which hit the breakpoint, but then continue
580 stepping the thread user has selected. */
581 static ptid_t deferred_step_ptid;
582 \f
583 /* Displaced stepping. */
584
585 /* In non-stop debugging mode, we must take special care to manage
586 breakpoints properly; in particular, the traditional strategy for
587 stepping a thread past a breakpoint it has hit is unsuitable.
588 'Displaced stepping' is a tactic for stepping one thread past a
589 breakpoint it has hit while ensuring that other threads running
590 concurrently will hit the breakpoint as they should.
591
592 The traditional way to step a thread T off a breakpoint in a
593 multi-threaded program in all-stop mode is as follows:
594
595 a0) Initially, all threads are stopped, and breakpoints are not
596 inserted.
597 a1) We single-step T, leaving breakpoints uninserted.
598 a2) We insert breakpoints, and resume all threads.
599
600 In non-stop debugging, however, this strategy is unsuitable: we
601 don't want to have to stop all threads in the system in order to
602 continue or step T past a breakpoint. Instead, we use displaced
603 stepping:
604
605 n0) Initially, T is stopped, other threads are running, and
606 breakpoints are inserted.
607 n1) We copy the instruction "under" the breakpoint to a separate
608 location, outside the main code stream, making any adjustments
609 to the instruction, register, and memory state as directed by
610 T's architecture.
611 n2) We single-step T over the instruction at its new location.
612 n3) We adjust the resulting register and memory state as directed
613 by T's architecture. This includes resetting T's PC to point
614 back into the main instruction stream.
615 n4) We resume T.
616
617 This approach depends on the following gdbarch methods:
618
619 - gdbarch_max_insn_length and gdbarch_displaced_step_location
620 indicate where to copy the instruction, and how much space must
621 be reserved there. We use these in step n1.
622
623 - gdbarch_displaced_step_copy_insn copies a instruction to a new
624 address, and makes any necessary adjustments to the instruction,
625 register contents, and memory. We use this in step n1.
626
627 - gdbarch_displaced_step_fixup adjusts registers and memory after
628 we have successfuly single-stepped the instruction, to yield the
629 same effect the instruction would have had if we had executed it
630 at its original address. We use this in step n3.
631
632 - gdbarch_displaced_step_free_closure provides cleanup.
633
634 The gdbarch_displaced_step_copy_insn and
635 gdbarch_displaced_step_fixup functions must be written so that
636 copying an instruction with gdbarch_displaced_step_copy_insn,
637 single-stepping across the copied instruction, and then applying
638 gdbarch_displaced_insn_fixup should have the same effects on the
639 thread's memory and registers as stepping the instruction in place
640 would have. Exactly which responsibilities fall to the copy and
641 which fall to the fixup is up to the author of those functions.
642
643 See the comments in gdbarch.sh for details.
644
645 Note that displaced stepping and software single-step cannot
646 currently be used in combination, although with some care I think
647 they could be made to. Software single-step works by placing
648 breakpoints on all possible subsequent instructions; if the
649 displaced instruction is a PC-relative jump, those breakpoints
650 could fall in very strange places --- on pages that aren't
651 executable, or at addresses that are not proper instruction
652 boundaries. (We do generally let other threads run while we wait
653 to hit the software single-step breakpoint, and they might
654 encounter such a corrupted instruction.) One way to work around
655 this would be to have gdbarch_displaced_step_copy_insn fully
656 simulate the effect of PC-relative instructions (and return NULL)
657 on architectures that use software single-stepping.
658
659 In non-stop mode, we can have independent and simultaneous step
660 requests, so more than one thread may need to simultaneously step
661 over a breakpoint. The current implementation assumes there is
662 only one scratch space per process. In this case, we have to
663 serialize access to the scratch space. If thread A wants to step
664 over a breakpoint, but we are currently waiting for some other
665 thread to complete a displaced step, we leave thread A stopped and
666 place it in the displaced_step_request_queue. Whenever a displaced
667 step finishes, we pick the next thread in the queue and start a new
668 displaced step operation on it. See displaced_step_prepare and
669 displaced_step_fixup for details. */
670
671 /* If this is not null_ptid, this is the thread carrying out a
672 displaced single-step. This thread's state will require fixing up
673 once it has completed its step. */
674 static ptid_t displaced_step_ptid;
675
676 struct displaced_step_request
677 {
678 ptid_t ptid;
679 struct displaced_step_request *next;
680 };
681
682 /* A queue of pending displaced stepping requests. */
683 struct displaced_step_request *displaced_step_request_queue;
684
685 /* The architecture the thread had when we stepped it. */
686 static struct gdbarch *displaced_step_gdbarch;
687
688 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
689 for post-step cleanup. */
690 static struct displaced_step_closure *displaced_step_closure;
691
692 /* The address of the original instruction, and the copy we made. */
693 static CORE_ADDR displaced_step_original, displaced_step_copy;
694
695 /* Saved contents of copy area. */
696 static gdb_byte *displaced_step_saved_copy;
697
698 /* Enum strings for "set|show displaced-stepping". */
699
700 static const char can_use_displaced_stepping_auto[] = "auto";
701 static const char can_use_displaced_stepping_on[] = "on";
702 static const char can_use_displaced_stepping_off[] = "off";
703 static const char *can_use_displaced_stepping_enum[] =
704 {
705 can_use_displaced_stepping_auto,
706 can_use_displaced_stepping_on,
707 can_use_displaced_stepping_off,
708 NULL,
709 };
710
711 /* If ON, and the architecture supports it, GDB will use displaced
712 stepping to step over breakpoints. If OFF, or if the architecture
713 doesn't support it, GDB will instead use the traditional
714 hold-and-step approach. If AUTO (which is the default), GDB will
715 decide which technique to use to step over breakpoints depending on
716 which of all-stop or non-stop mode is active --- displaced stepping
717 in non-stop mode; hold-and-step in all-stop mode. */
718
719 static const char *can_use_displaced_stepping =
720 can_use_displaced_stepping_auto;
721
722 static void
723 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
724 struct cmd_list_element *c,
725 const char *value)
726 {
727 if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
728 fprintf_filtered (file, _("\
729 Debugger's willingness to use displaced stepping to step over \
730 breakpoints is %s (currently %s).\n"),
731 value, non_stop ? "on" : "off");
732 else
733 fprintf_filtered (file, _("\
734 Debugger's willingness to use displaced stepping to step over \
735 breakpoints is %s.\n"), value);
736 }
737
738 /* Return non-zero if displaced stepping can/should be used to step
739 over breakpoints. */
740
741 static int
742 use_displaced_stepping (struct gdbarch *gdbarch)
743 {
744 return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
745 && non_stop)
746 || can_use_displaced_stepping == can_use_displaced_stepping_on)
747 && gdbarch_displaced_step_copy_insn_p (gdbarch)
748 && !RECORD_IS_USED);
749 }
750
751 /* Clean out any stray displaced stepping state. */
752 static void
753 displaced_step_clear (void)
754 {
755 /* Indicate that there is no cleanup pending. */
756 displaced_step_ptid = null_ptid;
757
758 if (displaced_step_closure)
759 {
760 gdbarch_displaced_step_free_closure (displaced_step_gdbarch,
761 displaced_step_closure);
762 displaced_step_closure = NULL;
763 }
764 }
765
766 static void
767 displaced_step_clear_cleanup (void *ignore)
768 {
769 displaced_step_clear ();
770 }
771
772 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
773 void
774 displaced_step_dump_bytes (struct ui_file *file,
775 const gdb_byte *buf,
776 size_t len)
777 {
778 int i;
779
780 for (i = 0; i < len; i++)
781 fprintf_unfiltered (file, "%02x ", buf[i]);
782 fputs_unfiltered ("\n", file);
783 }
784
785 /* Prepare to single-step, using displaced stepping.
786
787 Note that we cannot use displaced stepping when we have a signal to
788 deliver. If we have a signal to deliver and an instruction to step
789 over, then after the step, there will be no indication from the
790 target whether the thread entered a signal handler or ignored the
791 signal and stepped over the instruction successfully --- both cases
792 result in a simple SIGTRAP. In the first case we mustn't do a
793 fixup, and in the second case we must --- but we can't tell which.
794 Comments in the code for 'random signals' in handle_inferior_event
795 explain how we handle this case instead.
796
797 Returns 1 if preparing was successful -- this thread is going to be
798 stepped now; or 0 if displaced stepping this thread got queued. */
799 static int
800 displaced_step_prepare (ptid_t ptid)
801 {
802 struct cleanup *old_cleanups, *ignore_cleanups;
803 struct regcache *regcache = get_thread_regcache (ptid);
804 struct gdbarch *gdbarch = get_regcache_arch (regcache);
805 CORE_ADDR original, copy;
806 ULONGEST len;
807 struct displaced_step_closure *closure;
808
809 /* We should never reach this function if the architecture does not
810 support displaced stepping. */
811 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
812
813 /* For the first cut, we're displaced stepping one thread at a
814 time. */
815
816 if (!ptid_equal (displaced_step_ptid, null_ptid))
817 {
818 /* Already waiting for a displaced step to finish. Defer this
819 request and place in queue. */
820 struct displaced_step_request *req, *new_req;
821
822 if (debug_displaced)
823 fprintf_unfiltered (gdb_stdlog,
824 "displaced: defering step of %s\n",
825 target_pid_to_str (ptid));
826
827 new_req = xmalloc (sizeof (*new_req));
828 new_req->ptid = ptid;
829 new_req->next = NULL;
830
831 if (displaced_step_request_queue)
832 {
833 for (req = displaced_step_request_queue;
834 req && req->next;
835 req = req->next)
836 ;
837 req->next = new_req;
838 }
839 else
840 displaced_step_request_queue = new_req;
841
842 return 0;
843 }
844 else
845 {
846 if (debug_displaced)
847 fprintf_unfiltered (gdb_stdlog,
848 "displaced: stepping %s now\n",
849 target_pid_to_str (ptid));
850 }
851
852 displaced_step_clear ();
853
854 old_cleanups = save_inferior_ptid ();
855 inferior_ptid = ptid;
856
857 original = regcache_read_pc (regcache);
858
859 copy = gdbarch_displaced_step_location (gdbarch);
860 len = gdbarch_max_insn_length (gdbarch);
861
862 /* Save the original contents of the copy area. */
863 displaced_step_saved_copy = xmalloc (len);
864 ignore_cleanups = make_cleanup (free_current_contents,
865 &displaced_step_saved_copy);
866 read_memory (copy, displaced_step_saved_copy, len);
867 if (debug_displaced)
868 {
869 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
870 paddress (gdbarch, copy));
871 displaced_step_dump_bytes (gdb_stdlog, displaced_step_saved_copy, len);
872 };
873
874 closure = gdbarch_displaced_step_copy_insn (gdbarch,
875 original, copy, regcache);
876
877 /* We don't support the fully-simulated case at present. */
878 gdb_assert (closure);
879
880 /* Save the information we need to fix things up if the step
881 succeeds. */
882 displaced_step_ptid = ptid;
883 displaced_step_gdbarch = gdbarch;
884 displaced_step_closure = closure;
885 displaced_step_original = original;
886 displaced_step_copy = copy;
887
888 make_cleanup (displaced_step_clear_cleanup, 0);
889
890 /* Resume execution at the copy. */
891 regcache_write_pc (regcache, copy);
892
893 discard_cleanups (ignore_cleanups);
894
895 do_cleanups (old_cleanups);
896
897 if (debug_displaced)
898 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
899 paddress (gdbarch, copy));
900
901 return 1;
902 }
903
904 static void
905 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
906 {
907 struct cleanup *ptid_cleanup = save_inferior_ptid ();
908 inferior_ptid = ptid;
909 write_memory (memaddr, myaddr, len);
910 do_cleanups (ptid_cleanup);
911 }
912
913 static void
914 displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
915 {
916 struct cleanup *old_cleanups;
917
918 /* Was this event for the pid we displaced? */
919 if (ptid_equal (displaced_step_ptid, null_ptid)
920 || ! ptid_equal (displaced_step_ptid, event_ptid))
921 return;
922
923 old_cleanups = make_cleanup (displaced_step_clear_cleanup, 0);
924
925 /* Restore the contents of the copy area. */
926 {
927 ULONGEST len = gdbarch_max_insn_length (displaced_step_gdbarch);
928 write_memory_ptid (displaced_step_ptid, displaced_step_copy,
929 displaced_step_saved_copy, len);
930 if (debug_displaced)
931 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
932 paddress (displaced_step_gdbarch,
933 displaced_step_copy));
934 }
935
936 /* Did the instruction complete successfully? */
937 if (signal == TARGET_SIGNAL_TRAP)
938 {
939 /* Fix up the resulting state. */
940 gdbarch_displaced_step_fixup (displaced_step_gdbarch,
941 displaced_step_closure,
942 displaced_step_original,
943 displaced_step_copy,
944 get_thread_regcache (displaced_step_ptid));
945 }
946 else
947 {
948 /* Since the instruction didn't complete, all we can do is
949 relocate the PC. */
950 struct regcache *regcache = get_thread_regcache (event_ptid);
951 CORE_ADDR pc = regcache_read_pc (regcache);
952 pc = displaced_step_original + (pc - displaced_step_copy);
953 regcache_write_pc (regcache, pc);
954 }
955
956 do_cleanups (old_cleanups);
957
958 displaced_step_ptid = null_ptid;
959
960 /* Are there any pending displaced stepping requests? If so, run
961 one now. */
962 while (displaced_step_request_queue)
963 {
964 struct displaced_step_request *head;
965 ptid_t ptid;
966 struct regcache *regcache;
967 struct gdbarch *gdbarch;
968 CORE_ADDR actual_pc;
969
970 head = displaced_step_request_queue;
971 ptid = head->ptid;
972 displaced_step_request_queue = head->next;
973 xfree (head);
974
975 context_switch (ptid);
976
977 regcache = get_thread_regcache (ptid);
978 actual_pc = regcache_read_pc (regcache);
979
980 if (breakpoint_here_p (actual_pc))
981 {
982 if (debug_displaced)
983 fprintf_unfiltered (gdb_stdlog,
984 "displaced: stepping queued %s now\n",
985 target_pid_to_str (ptid));
986
987 displaced_step_prepare (ptid);
988
989 gdbarch = get_regcache_arch (regcache);
990
991 if (debug_displaced)
992 {
993 CORE_ADDR actual_pc = regcache_read_pc (regcache);
994 gdb_byte buf[4];
995
996 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
997 paddress (gdbarch, actual_pc));
998 read_memory (actual_pc, buf, sizeof (buf));
999 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1000 }
1001
1002 if (gdbarch_software_single_step_p (gdbarch))
1003 target_resume (ptid, 0, TARGET_SIGNAL_0);
1004 else
1005 target_resume (ptid, 1, TARGET_SIGNAL_0);
1006
1007 /* Done, we're stepping a thread. */
1008 break;
1009 }
1010 else
1011 {
1012 int step;
1013 struct thread_info *tp = inferior_thread ();
1014
1015 /* The breakpoint we were sitting under has since been
1016 removed. */
1017 tp->trap_expected = 0;
1018
1019 /* Go back to what we were trying to do. */
1020 step = currently_stepping (tp);
1021
1022 if (debug_displaced)
1023 fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1024 target_pid_to_str (tp->ptid), step);
1025
1026 target_resume (ptid, step, TARGET_SIGNAL_0);
1027 tp->stop_signal = TARGET_SIGNAL_0;
1028
1029 /* This request was discarded. See if there's any other
1030 thread waiting for its turn. */
1031 }
1032 }
1033 }
1034
1035 /* Update global variables holding ptids to hold NEW_PTID if they were
1036 holding OLD_PTID. */
1037 static void
1038 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1039 {
1040 struct displaced_step_request *it;
1041
1042 if (ptid_equal (inferior_ptid, old_ptid))
1043 inferior_ptid = new_ptid;
1044
1045 if (ptid_equal (singlestep_ptid, old_ptid))
1046 singlestep_ptid = new_ptid;
1047
1048 if (ptid_equal (displaced_step_ptid, old_ptid))
1049 displaced_step_ptid = new_ptid;
1050
1051 if (ptid_equal (deferred_step_ptid, old_ptid))
1052 deferred_step_ptid = new_ptid;
1053
1054 for (it = displaced_step_request_queue; it; it = it->next)
1055 if (ptid_equal (it->ptid, old_ptid))
1056 it->ptid = new_ptid;
1057 }
1058
1059 \f
1060 /* Resuming. */
1061
1062 /* Things to clean up if we QUIT out of resume (). */
1063 static void
1064 resume_cleanups (void *ignore)
1065 {
1066 normal_stop ();
1067 }
1068
1069 static const char schedlock_off[] = "off";
1070 static const char schedlock_on[] = "on";
1071 static const char schedlock_step[] = "step";
1072 static const char *scheduler_enums[] = {
1073 schedlock_off,
1074 schedlock_on,
1075 schedlock_step,
1076 NULL
1077 };
1078 static const char *scheduler_mode = schedlock_off;
1079 static void
1080 show_scheduler_mode (struct ui_file *file, int from_tty,
1081 struct cmd_list_element *c, const char *value)
1082 {
1083 fprintf_filtered (file, _("\
1084 Mode for locking scheduler during execution is \"%s\".\n"),
1085 value);
1086 }
1087
1088 static void
1089 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1090 {
1091 if (!target_can_lock_scheduler)
1092 {
1093 scheduler_mode = schedlock_off;
1094 error (_("Target '%s' cannot support this command."), target_shortname);
1095 }
1096 }
1097
1098 /* True if execution commands resume all threads of all processes by
1099 default; otherwise, resume only threads of the current inferior
1100 process. */
1101 int sched_multi = 0;
1102
1103 /* Try to setup for software single stepping over the specified location.
1104 Return 1 if target_resume() should use hardware single step.
1105
1106 GDBARCH the current gdbarch.
1107 PC the location to step over. */
1108
1109 static int
1110 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1111 {
1112 int hw_step = 1;
1113
1114 if (gdbarch_software_single_step_p (gdbarch))
1115 {
1116 if (use_displaced_stepping (gdbarch))
1117 hw_step = 0;
1118 else if (gdbarch_software_single_step (gdbarch, get_current_frame ()))
1119 {
1120 hw_step = 0;
1121 /* Do not pull these breakpoints until after a `wait' in
1122 `wait_for_inferior' */
1123 singlestep_breakpoints_inserted_p = 1;
1124 singlestep_ptid = inferior_ptid;
1125 singlestep_pc = pc;
1126 }
1127 }
1128 return hw_step;
1129 }
1130
1131 /* Resume the inferior, but allow a QUIT. This is useful if the user
1132 wants to interrupt some lengthy single-stepping operation
1133 (for child processes, the SIGINT goes to the inferior, and so
1134 we get a SIGINT random_signal, but for remote debugging and perhaps
1135 other targets, that's not true).
1136
1137 STEP nonzero if we should step (zero to continue instead).
1138 SIG is the signal to give the inferior (zero for none). */
1139 void
1140 resume (int step, enum target_signal sig)
1141 {
1142 int should_resume = 1;
1143 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1144 struct regcache *regcache = get_current_regcache ();
1145 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1146 struct thread_info *tp = inferior_thread ();
1147 CORE_ADDR pc = regcache_read_pc (regcache);
1148
1149 QUIT;
1150
1151 if (debug_infrun)
1152 fprintf_unfiltered (gdb_stdlog,
1153 "infrun: resume (step=%d, signal=%d), "
1154 "trap_expected=%d\n",
1155 step, sig, tp->trap_expected);
1156
1157 /* Some targets (e.g. Solaris x86) have a kernel bug when stepping
1158 over an instruction that causes a page fault without triggering
1159 a hardware watchpoint. The kernel properly notices that it shouldn't
1160 stop, because the hardware watchpoint is not triggered, but it forgets
1161 the step request and continues the program normally.
1162 Work around the problem by removing hardware watchpoints if a step is
1163 requested, GDB will check for a hardware watchpoint trigger after the
1164 step anyway. */
1165 if (CANNOT_STEP_HW_WATCHPOINTS && step)
1166 remove_hw_watchpoints ();
1167
1168
1169 /* Normally, by the time we reach `resume', the breakpoints are either
1170 removed or inserted, as appropriate. The exception is if we're sitting
1171 at a permanent breakpoint; we need to step over it, but permanent
1172 breakpoints can't be removed. So we have to test for it here. */
1173 if (breakpoint_here_p (pc) == permanent_breakpoint_here)
1174 {
1175 if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1176 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1177 else
1178 error (_("\
1179 The program is stopped at a permanent breakpoint, but GDB does not know\n\
1180 how to step past a permanent breakpoint on this architecture. Try using\n\
1181 a command like `return' or `jump' to continue execution."));
1182 }
1183
1184 /* If enabled, step over breakpoints by executing a copy of the
1185 instruction at a different address.
1186
1187 We can't use displaced stepping when we have a signal to deliver;
1188 the comments for displaced_step_prepare explain why. The
1189 comments in the handle_inferior event for dealing with 'random
1190 signals' explain what we do instead. */
1191 if (use_displaced_stepping (gdbarch)
1192 && (tp->trap_expected
1193 || (step && gdbarch_software_single_step_p (gdbarch)))
1194 && sig == TARGET_SIGNAL_0)
1195 {
1196 if (!displaced_step_prepare (inferior_ptid))
1197 {
1198 /* Got placed in displaced stepping queue. Will be resumed
1199 later when all the currently queued displaced stepping
1200 requests finish. The thread is not executing at this point,
1201 and the call to set_executing will be made later. But we
1202 need to call set_running here, since from frontend point of view,
1203 the thread is running. */
1204 set_running (inferior_ptid, 1);
1205 discard_cleanups (old_cleanups);
1206 return;
1207 }
1208 }
1209
1210 /* Do we need to do it the hard way, w/temp breakpoints? */
1211 if (step)
1212 step = maybe_software_singlestep (gdbarch, pc);
1213
1214 if (should_resume)
1215 {
1216 ptid_t resume_ptid;
1217
1218 /* If STEP is set, it's a request to use hardware stepping
1219 facilities. But in that case, we should never
1220 use singlestep breakpoint. */
1221 gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1222
1223 /* Decide the set of threads to ask the target to resume. Start
1224 by assuming everything will be resumed, than narrow the set
1225 by applying increasingly restricting conditions. */
1226
1227 /* By default, resume all threads of all processes. */
1228 resume_ptid = RESUME_ALL;
1229
1230 /* Maybe resume only all threads of the current process. */
1231 if (!sched_multi && target_supports_multi_process ())
1232 {
1233 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1234 }
1235
1236 /* Maybe resume a single thread after all. */
1237 if (singlestep_breakpoints_inserted_p
1238 && stepping_past_singlestep_breakpoint)
1239 {
1240 /* The situation here is as follows. In thread T1 we wanted to
1241 single-step. Lacking hardware single-stepping we've
1242 set breakpoint at the PC of the next instruction -- call it
1243 P. After resuming, we've hit that breakpoint in thread T2.
1244 Now we've removed original breakpoint, inserted breakpoint
1245 at P+1, and try to step to advance T2 past breakpoint.
1246 We need to step only T2, as if T1 is allowed to freely run,
1247 it can run past P, and if other threads are allowed to run,
1248 they can hit breakpoint at P+1, and nested hits of single-step
1249 breakpoints is not something we'd want -- that's complicated
1250 to support, and has no value. */
1251 resume_ptid = inferior_ptid;
1252 }
1253 else if ((step || singlestep_breakpoints_inserted_p)
1254 && tp->trap_expected)
1255 {
1256 /* We're allowing a thread to run past a breakpoint it has
1257 hit, by single-stepping the thread with the breakpoint
1258 removed. In which case, we need to single-step only this
1259 thread, and keep others stopped, as they can miss this
1260 breakpoint if allowed to run.
1261
1262 The current code actually removes all breakpoints when
1263 doing this, not just the one being stepped over, so if we
1264 let other threads run, we can actually miss any
1265 breakpoint, not just the one at PC. */
1266 resume_ptid = inferior_ptid;
1267 }
1268 else if (non_stop)
1269 {
1270 /* With non-stop mode on, threads are always handled
1271 individually. */
1272 resume_ptid = inferior_ptid;
1273 }
1274 else if ((scheduler_mode == schedlock_on)
1275 || (scheduler_mode == schedlock_step
1276 && (step || singlestep_breakpoints_inserted_p)))
1277 {
1278 /* User-settable 'scheduler' mode requires solo thread resume. */
1279 resume_ptid = inferior_ptid;
1280 }
1281
1282 if (gdbarch_cannot_step_breakpoint (gdbarch))
1283 {
1284 /* Most targets can step a breakpoint instruction, thus
1285 executing it normally. But if this one cannot, just
1286 continue and we will hit it anyway. */
1287 if (step && breakpoint_inserted_here_p (pc))
1288 step = 0;
1289 }
1290
1291 if (debug_displaced
1292 && use_displaced_stepping (gdbarch)
1293 && tp->trap_expected)
1294 {
1295 struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1296 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1297 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1298 gdb_byte buf[4];
1299
1300 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1301 paddress (resume_gdbarch, actual_pc));
1302 read_memory (actual_pc, buf, sizeof (buf));
1303 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1304 }
1305
1306 /* Install inferior's terminal modes. */
1307 target_terminal_inferior ();
1308
1309 /* Avoid confusing the next resume, if the next stop/resume
1310 happens to apply to another thread. */
1311 tp->stop_signal = TARGET_SIGNAL_0;
1312
1313 target_resume (resume_ptid, step, sig);
1314 }
1315
1316 discard_cleanups (old_cleanups);
1317 }
1318 \f
1319 /* Proceeding. */
1320
1321 /* Clear out all variables saying what to do when inferior is continued.
1322 First do this, then set the ones you want, then call `proceed'. */
1323
1324 static void
1325 clear_proceed_status_thread (struct thread_info *tp)
1326 {
1327 if (debug_infrun)
1328 fprintf_unfiltered (gdb_stdlog,
1329 "infrun: clear_proceed_status_thread (%s)\n",
1330 target_pid_to_str (tp->ptid));
1331
1332 tp->trap_expected = 0;
1333 tp->step_range_start = 0;
1334 tp->step_range_end = 0;
1335 tp->step_frame_id = null_frame_id;
1336 tp->step_stack_frame_id = null_frame_id;
1337 tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1338 tp->stop_requested = 0;
1339
1340 tp->stop_step = 0;
1341
1342 tp->proceed_to_finish = 0;
1343
1344 /* Discard any remaining commands or status from previous stop. */
1345 bpstat_clear (&tp->stop_bpstat);
1346 }
1347
1348 static int
1349 clear_proceed_status_callback (struct thread_info *tp, void *data)
1350 {
1351 if (is_exited (tp->ptid))
1352 return 0;
1353
1354 clear_proceed_status_thread (tp);
1355 return 0;
1356 }
1357
1358 void
1359 clear_proceed_status (void)
1360 {
1361 if (!ptid_equal (inferior_ptid, null_ptid))
1362 {
1363 struct inferior *inferior;
1364
1365 if (non_stop)
1366 {
1367 /* If in non-stop mode, only delete the per-thread status
1368 of the current thread. */
1369 clear_proceed_status_thread (inferior_thread ());
1370 }
1371 else
1372 {
1373 /* In all-stop mode, delete the per-thread status of
1374 *all* threads. */
1375 iterate_over_threads (clear_proceed_status_callback, NULL);
1376 }
1377
1378 inferior = current_inferior ();
1379 inferior->stop_soon = NO_STOP_QUIETLY;
1380 }
1381
1382 stop_after_trap = 0;
1383
1384 observer_notify_about_to_proceed ();
1385
1386 if (stop_registers)
1387 {
1388 regcache_xfree (stop_registers);
1389 stop_registers = NULL;
1390 }
1391 }
1392
1393 /* Check the current thread against the thread that reported the most recent
1394 event. If a step-over is required return TRUE and set the current thread
1395 to the old thread. Otherwise return FALSE.
1396
1397 This should be suitable for any targets that support threads. */
1398
1399 static int
1400 prepare_to_proceed (int step)
1401 {
1402 ptid_t wait_ptid;
1403 struct target_waitstatus wait_status;
1404 int schedlock_enabled;
1405
1406 /* With non-stop mode on, threads are always handled individually. */
1407 gdb_assert (! non_stop);
1408
1409 /* Get the last target status returned by target_wait(). */
1410 get_last_target_status (&wait_ptid, &wait_status);
1411
1412 /* Make sure we were stopped at a breakpoint. */
1413 if (wait_status.kind != TARGET_WAITKIND_STOPPED
1414 || wait_status.value.sig != TARGET_SIGNAL_TRAP)
1415 {
1416 return 0;
1417 }
1418
1419 schedlock_enabled = (scheduler_mode == schedlock_on
1420 || (scheduler_mode == schedlock_step
1421 && step));
1422
1423 /* Don't switch over to WAIT_PTID if scheduler locking is on. */
1424 if (schedlock_enabled)
1425 return 0;
1426
1427 /* Don't switch over if we're about to resume some other process
1428 other than WAIT_PTID's, and schedule-multiple is off. */
1429 if (!sched_multi
1430 && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1431 return 0;
1432
1433 /* Switched over from WAIT_PID. */
1434 if (!ptid_equal (wait_ptid, minus_one_ptid)
1435 && !ptid_equal (inferior_ptid, wait_ptid))
1436 {
1437 struct regcache *regcache = get_thread_regcache (wait_ptid);
1438
1439 if (breakpoint_here_p (regcache_read_pc (regcache)))
1440 {
1441 /* If stepping, remember current thread to switch back to. */
1442 if (step)
1443 deferred_step_ptid = inferior_ptid;
1444
1445 /* Switch back to WAIT_PID thread. */
1446 switch_to_thread (wait_ptid);
1447
1448 /* We return 1 to indicate that there is a breakpoint here,
1449 so we need to step over it before continuing to avoid
1450 hitting it straight away. */
1451 return 1;
1452 }
1453 }
1454
1455 return 0;
1456 }
1457
1458 /* Basic routine for continuing the program in various fashions.
1459
1460 ADDR is the address to resume at, or -1 for resume where stopped.
1461 SIGGNAL is the signal to give it, or 0 for none,
1462 or -1 for act according to how it stopped.
1463 STEP is nonzero if should trap after one instruction.
1464 -1 means return after that and print nothing.
1465 You should probably set various step_... variables
1466 before calling here, if you are stepping.
1467
1468 You should call clear_proceed_status before calling proceed. */
1469
1470 void
1471 proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1472 {
1473 struct regcache *regcache;
1474 struct gdbarch *gdbarch;
1475 struct thread_info *tp;
1476 CORE_ADDR pc;
1477 int oneproc = 0;
1478
1479 /* If we're stopped at a fork/vfork, follow the branch set by the
1480 "set follow-fork-mode" command; otherwise, we'll just proceed
1481 resuming the current thread. */
1482 if (!follow_fork ())
1483 {
1484 /* The target for some reason decided not to resume. */
1485 normal_stop ();
1486 return;
1487 }
1488
1489 regcache = get_current_regcache ();
1490 gdbarch = get_regcache_arch (regcache);
1491 pc = regcache_read_pc (regcache);
1492
1493 if (step > 0)
1494 step_start_function = find_pc_function (pc);
1495 if (step < 0)
1496 stop_after_trap = 1;
1497
1498 if (addr == (CORE_ADDR) -1)
1499 {
1500 if (pc == stop_pc && breakpoint_here_p (pc)
1501 && execution_direction != EXEC_REVERSE)
1502 /* There is a breakpoint at the address we will resume at,
1503 step one instruction before inserting breakpoints so that
1504 we do not stop right away (and report a second hit at this
1505 breakpoint).
1506
1507 Note, we don't do this in reverse, because we won't
1508 actually be executing the breakpoint insn anyway.
1509 We'll be (un-)executing the previous instruction. */
1510
1511 oneproc = 1;
1512 else if (gdbarch_single_step_through_delay_p (gdbarch)
1513 && gdbarch_single_step_through_delay (gdbarch,
1514 get_current_frame ()))
1515 /* We stepped onto an instruction that needs to be stepped
1516 again before re-inserting the breakpoint, do so. */
1517 oneproc = 1;
1518 }
1519 else
1520 {
1521 regcache_write_pc (regcache, addr);
1522 }
1523
1524 if (debug_infrun)
1525 fprintf_unfiltered (gdb_stdlog,
1526 "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1527 paddress (gdbarch, addr), siggnal, step);
1528
1529 if (non_stop)
1530 /* In non-stop, each thread is handled individually. The context
1531 must already be set to the right thread here. */
1532 ;
1533 else
1534 {
1535 /* In a multi-threaded task we may select another thread and
1536 then continue or step.
1537
1538 But if the old thread was stopped at a breakpoint, it will
1539 immediately cause another breakpoint stop without any
1540 execution (i.e. it will report a breakpoint hit incorrectly).
1541 So we must step over it first.
1542
1543 prepare_to_proceed checks the current thread against the
1544 thread that reported the most recent event. If a step-over
1545 is required it returns TRUE and sets the current thread to
1546 the old thread. */
1547 if (prepare_to_proceed (step))
1548 oneproc = 1;
1549 }
1550
1551 /* prepare_to_proceed may change the current thread. */
1552 tp = inferior_thread ();
1553
1554 if (oneproc)
1555 {
1556 tp->trap_expected = 1;
1557 /* If displaced stepping is enabled, we can step over the
1558 breakpoint without hitting it, so leave all breakpoints
1559 inserted. Otherwise we need to disable all breakpoints, step
1560 one instruction, and then re-add them when that step is
1561 finished. */
1562 if (!use_displaced_stepping (gdbarch))
1563 remove_breakpoints ();
1564 }
1565
1566 /* We can insert breakpoints if we're not trying to step over one,
1567 or if we are stepping over one but we're using displaced stepping
1568 to do so. */
1569 if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1570 insert_breakpoints ();
1571
1572 if (!non_stop)
1573 {
1574 /* Pass the last stop signal to the thread we're resuming,
1575 irrespective of whether the current thread is the thread that
1576 got the last event or not. This was historically GDB's
1577 behaviour before keeping a stop_signal per thread. */
1578
1579 struct thread_info *last_thread;
1580 ptid_t last_ptid;
1581 struct target_waitstatus last_status;
1582
1583 get_last_target_status (&last_ptid, &last_status);
1584 if (!ptid_equal (inferior_ptid, last_ptid)
1585 && !ptid_equal (last_ptid, null_ptid)
1586 && !ptid_equal (last_ptid, minus_one_ptid))
1587 {
1588 last_thread = find_thread_ptid (last_ptid);
1589 if (last_thread)
1590 {
1591 tp->stop_signal = last_thread->stop_signal;
1592 last_thread->stop_signal = TARGET_SIGNAL_0;
1593 }
1594 }
1595 }
1596
1597 if (siggnal != TARGET_SIGNAL_DEFAULT)
1598 tp->stop_signal = siggnal;
1599 /* If this signal should not be seen by program,
1600 give it zero. Used for debugging signals. */
1601 else if (!signal_program[tp->stop_signal])
1602 tp->stop_signal = TARGET_SIGNAL_0;
1603
1604 annotate_starting ();
1605
1606 /* Make sure that output from GDB appears before output from the
1607 inferior. */
1608 gdb_flush (gdb_stdout);
1609
1610 /* Refresh prev_pc value just prior to resuming. This used to be
1611 done in stop_stepping, however, setting prev_pc there did not handle
1612 scenarios such as inferior function calls or returning from
1613 a function via the return command. In those cases, the prev_pc
1614 value was not set properly for subsequent commands. The prev_pc value
1615 is used to initialize the starting line number in the ecs. With an
1616 invalid value, the gdb next command ends up stopping at the position
1617 represented by the next line table entry past our start position.
1618 On platforms that generate one line table entry per line, this
1619 is not a problem. However, on the ia64, the compiler generates
1620 extraneous line table entries that do not increase the line number.
1621 When we issue the gdb next command on the ia64 after an inferior call
1622 or a return command, we often end up a few instructions forward, still
1623 within the original line we started.
1624
1625 An attempt was made to have init_execution_control_state () refresh
1626 the prev_pc value before calculating the line number. This approach
1627 did not work because on platforms that use ptrace, the pc register
1628 cannot be read unless the inferior is stopped. At that point, we
1629 are not guaranteed the inferior is stopped and so the regcache_read_pc ()
1630 call can fail. Setting the prev_pc value here ensures the value is
1631 updated correctly when the inferior is stopped. */
1632 tp->prev_pc = regcache_read_pc (get_current_regcache ());
1633
1634 /* Fill in with reasonable starting values. */
1635 init_thread_stepping_state (tp);
1636
1637 /* Reset to normal state. */
1638 init_infwait_state ();
1639
1640 /* Resume inferior. */
1641 resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
1642
1643 /* Wait for it to stop (if not standalone)
1644 and in any case decode why it stopped, and act accordingly. */
1645 /* Do this only if we are not using the event loop, or if the target
1646 does not support asynchronous execution. */
1647 if (!target_can_async_p ())
1648 {
1649 wait_for_inferior (0);
1650 normal_stop ();
1651 }
1652 }
1653 \f
1654
1655 /* Start remote-debugging of a machine over a serial link. */
1656
1657 void
1658 start_remote (int from_tty)
1659 {
1660 struct inferior *inferior;
1661 init_wait_for_inferior ();
1662
1663 inferior = current_inferior ();
1664 inferior->stop_soon = STOP_QUIETLY_REMOTE;
1665
1666 /* Always go on waiting for the target, regardless of the mode. */
1667 /* FIXME: cagney/1999-09-23: At present it isn't possible to
1668 indicate to wait_for_inferior that a target should timeout if
1669 nothing is returned (instead of just blocking). Because of this,
1670 targets expecting an immediate response need to, internally, set
1671 things up so that the target_wait() is forced to eventually
1672 timeout. */
1673 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
1674 differentiate to its caller what the state of the target is after
1675 the initial open has been performed. Here we're assuming that
1676 the target has stopped. It should be possible to eventually have
1677 target_open() return to the caller an indication that the target
1678 is currently running and GDB state should be set to the same as
1679 for an async run. */
1680 wait_for_inferior (0);
1681
1682 /* Now that the inferior has stopped, do any bookkeeping like
1683 loading shared libraries. We want to do this before normal_stop,
1684 so that the displayed frame is up to date. */
1685 post_create_inferior (&current_target, from_tty);
1686
1687 normal_stop ();
1688 }
1689
1690 /* Initialize static vars when a new inferior begins. */
1691
1692 void
1693 init_wait_for_inferior (void)
1694 {
1695 /* These are meaningless until the first time through wait_for_inferior. */
1696
1697 breakpoint_init_inferior (inf_starting);
1698
1699 clear_proceed_status ();
1700
1701 stepping_past_singlestep_breakpoint = 0;
1702 deferred_step_ptid = null_ptid;
1703
1704 target_last_wait_ptid = minus_one_ptid;
1705
1706 previous_inferior_ptid = null_ptid;
1707 init_infwait_state ();
1708
1709 displaced_step_clear ();
1710
1711 /* Discard any skipped inlined frames. */
1712 clear_inline_frame_state (minus_one_ptid);
1713 }
1714
1715 \f
1716 /* This enum encodes possible reasons for doing a target_wait, so that
1717 wfi can call target_wait in one place. (Ultimately the call will be
1718 moved out of the infinite loop entirely.) */
1719
1720 enum infwait_states
1721 {
1722 infwait_normal_state,
1723 infwait_thread_hop_state,
1724 infwait_step_watch_state,
1725 infwait_nonstep_watch_state
1726 };
1727
1728 /* Why did the inferior stop? Used to print the appropriate messages
1729 to the interface from within handle_inferior_event(). */
1730 enum inferior_stop_reason
1731 {
1732 /* Step, next, nexti, stepi finished. */
1733 END_STEPPING_RANGE,
1734 /* Inferior terminated by signal. */
1735 SIGNAL_EXITED,
1736 /* Inferior exited. */
1737 EXITED,
1738 /* Inferior received signal, and user asked to be notified. */
1739 SIGNAL_RECEIVED,
1740 /* Reverse execution -- target ran out of history info. */
1741 NO_HISTORY
1742 };
1743
1744 /* The PTID we'll do a target_wait on.*/
1745 ptid_t waiton_ptid;
1746
1747 /* Current inferior wait state. */
1748 enum infwait_states infwait_state;
1749
1750 /* Data to be passed around while handling an event. This data is
1751 discarded between events. */
1752 struct execution_control_state
1753 {
1754 ptid_t ptid;
1755 /* The thread that got the event, if this was a thread event; NULL
1756 otherwise. */
1757 struct thread_info *event_thread;
1758
1759 struct target_waitstatus ws;
1760 int random_signal;
1761 CORE_ADDR stop_func_start;
1762 CORE_ADDR stop_func_end;
1763 char *stop_func_name;
1764 int new_thread_event;
1765 int wait_some_more;
1766 };
1767
1768 static void init_execution_control_state (struct execution_control_state *ecs);
1769
1770 static void handle_inferior_event (struct execution_control_state *ecs);
1771
1772 static void handle_step_into_function (struct gdbarch *gdbarch,
1773 struct execution_control_state *ecs);
1774 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
1775 struct execution_control_state *ecs);
1776 static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
1777 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
1778 static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
1779 struct symtab_and_line sr_sal,
1780 struct frame_id sr_id);
1781 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
1782
1783 static void stop_stepping (struct execution_control_state *ecs);
1784 static void prepare_to_wait (struct execution_control_state *ecs);
1785 static void keep_going (struct execution_control_state *ecs);
1786 static void print_stop_reason (enum inferior_stop_reason stop_reason,
1787 int stop_info);
1788
1789 /* Callback for iterate over threads. If the thread is stopped, but
1790 the user/frontend doesn't know about that yet, go through
1791 normal_stop, as if the thread had just stopped now. ARG points at
1792 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
1793 ptid_is_pid(PTID) is true, applies to all threads of the process
1794 pointed at by PTID. Otherwise, apply only to the thread pointed by
1795 PTID. */
1796
1797 static int
1798 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
1799 {
1800 ptid_t ptid = * (ptid_t *) arg;
1801
1802 if ((ptid_equal (info->ptid, ptid)
1803 || ptid_equal (minus_one_ptid, ptid)
1804 || (ptid_is_pid (ptid)
1805 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
1806 && is_running (info->ptid)
1807 && !is_executing (info->ptid))
1808 {
1809 struct cleanup *old_chain;
1810 struct execution_control_state ecss;
1811 struct execution_control_state *ecs = &ecss;
1812
1813 memset (ecs, 0, sizeof (*ecs));
1814
1815 old_chain = make_cleanup_restore_current_thread ();
1816
1817 switch_to_thread (info->ptid);
1818
1819 /* Go through handle_inferior_event/normal_stop, so we always
1820 have consistent output as if the stop event had been
1821 reported. */
1822 ecs->ptid = info->ptid;
1823 ecs->event_thread = find_thread_ptid (info->ptid);
1824 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
1825 ecs->ws.value.sig = TARGET_SIGNAL_0;
1826
1827 handle_inferior_event (ecs);
1828
1829 if (!ecs->wait_some_more)
1830 {
1831 struct thread_info *tp;
1832
1833 normal_stop ();
1834
1835 /* Finish off the continuations. The continations
1836 themselves are responsible for realising the thread
1837 didn't finish what it was supposed to do. */
1838 tp = inferior_thread ();
1839 do_all_intermediate_continuations_thread (tp);
1840 do_all_continuations_thread (tp);
1841 }
1842
1843 do_cleanups (old_chain);
1844 }
1845
1846 return 0;
1847 }
1848
1849 /* This function is attached as a "thread_stop_requested" observer.
1850 Cleanup local state that assumed the PTID was to be resumed, and
1851 report the stop to the frontend. */
1852
1853 static void
1854 infrun_thread_stop_requested (ptid_t ptid)
1855 {
1856 struct displaced_step_request *it, *next, *prev = NULL;
1857
1858 /* PTID was requested to stop. Remove it from the displaced
1859 stepping queue, so we don't try to resume it automatically. */
1860 for (it = displaced_step_request_queue; it; it = next)
1861 {
1862 next = it->next;
1863
1864 if (ptid_equal (it->ptid, ptid)
1865 || ptid_equal (minus_one_ptid, ptid)
1866 || (ptid_is_pid (ptid)
1867 && ptid_get_pid (ptid) == ptid_get_pid (it->ptid)))
1868 {
1869 if (displaced_step_request_queue == it)
1870 displaced_step_request_queue = it->next;
1871 else
1872 prev->next = it->next;
1873
1874 xfree (it);
1875 }
1876 else
1877 prev = it;
1878 }
1879
1880 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
1881 }
1882
1883 static void
1884 infrun_thread_thread_exit (struct thread_info *tp, int silent)
1885 {
1886 if (ptid_equal (target_last_wait_ptid, tp->ptid))
1887 nullify_last_target_wait_ptid ();
1888 }
1889
1890 /* Callback for iterate_over_threads. */
1891
1892 static int
1893 delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
1894 {
1895 if (is_exited (info->ptid))
1896 return 0;
1897
1898 delete_step_resume_breakpoint (info);
1899 return 0;
1900 }
1901
1902 /* In all-stop, delete the step resume breakpoint of any thread that
1903 had one. In non-stop, delete the step resume breakpoint of the
1904 thread that just stopped. */
1905
1906 static void
1907 delete_step_thread_step_resume_breakpoint (void)
1908 {
1909 if (!target_has_execution
1910 || ptid_equal (inferior_ptid, null_ptid))
1911 /* If the inferior has exited, we have already deleted the step
1912 resume breakpoints out of GDB's lists. */
1913 return;
1914
1915 if (non_stop)
1916 {
1917 /* If in non-stop mode, only delete the step-resume or
1918 longjmp-resume breakpoint of the thread that just stopped
1919 stepping. */
1920 struct thread_info *tp = inferior_thread ();
1921 delete_step_resume_breakpoint (tp);
1922 }
1923 else
1924 /* In all-stop mode, delete all step-resume and longjmp-resume
1925 breakpoints of any thread that had them. */
1926 iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
1927 }
1928
1929 /* A cleanup wrapper. */
1930
1931 static void
1932 delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
1933 {
1934 delete_step_thread_step_resume_breakpoint ();
1935 }
1936
1937 /* Pretty print the results of target_wait, for debugging purposes. */
1938
1939 static void
1940 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
1941 const struct target_waitstatus *ws)
1942 {
1943 char *status_string = target_waitstatus_to_string (ws);
1944 struct ui_file *tmp_stream = mem_fileopen ();
1945 char *text;
1946 long len;
1947
1948 /* The text is split over several lines because it was getting too long.
1949 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
1950 output as a unit; we want only one timestamp printed if debug_timestamp
1951 is set. */
1952
1953 fprintf_unfiltered (tmp_stream,
1954 "infrun: target_wait (%d", PIDGET (waiton_ptid));
1955 if (PIDGET (waiton_ptid) != -1)
1956 fprintf_unfiltered (tmp_stream,
1957 " [%s]", target_pid_to_str (waiton_ptid));
1958 fprintf_unfiltered (tmp_stream, ", status) =\n");
1959 fprintf_unfiltered (tmp_stream,
1960 "infrun: %d [%s],\n",
1961 PIDGET (result_ptid), target_pid_to_str (result_ptid));
1962 fprintf_unfiltered (tmp_stream,
1963 "infrun: %s\n",
1964 status_string);
1965
1966 text = ui_file_xstrdup (tmp_stream, &len);
1967
1968 /* This uses %s in part to handle %'s in the text, but also to avoid
1969 a gcc error: the format attribute requires a string literal. */
1970 fprintf_unfiltered (gdb_stdlog, "%s", text);
1971
1972 xfree (status_string);
1973 xfree (text);
1974 ui_file_delete (tmp_stream);
1975 }
1976
1977 /* Wait for control to return from inferior to debugger.
1978
1979 If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
1980 as if they were SIGTRAP signals. This can be useful during
1981 the startup sequence on some targets such as HP/UX, where
1982 we receive an EXEC event instead of the expected SIGTRAP.
1983
1984 If inferior gets a signal, we may decide to start it up again
1985 instead of returning. That is why there is a loop in this function.
1986 When this function actually returns it means the inferior
1987 should be left stopped and GDB should read more commands. */
1988
1989 void
1990 wait_for_inferior (int treat_exec_as_sigtrap)
1991 {
1992 struct cleanup *old_cleanups;
1993 struct execution_control_state ecss;
1994 struct execution_control_state *ecs;
1995
1996 if (debug_infrun)
1997 fprintf_unfiltered
1998 (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
1999 treat_exec_as_sigtrap);
2000
2001 old_cleanups =
2002 make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2003
2004 ecs = &ecss;
2005 memset (ecs, 0, sizeof (*ecs));
2006
2007 /* We'll update this if & when we switch to a new thread. */
2008 previous_inferior_ptid = inferior_ptid;
2009
2010 while (1)
2011 {
2012 struct cleanup *old_chain;
2013
2014 /* We have to invalidate the registers BEFORE calling target_wait
2015 because they can be loaded from the target while in target_wait.
2016 This makes remote debugging a bit more efficient for those
2017 targets that provide critical registers as part of their normal
2018 status mechanism. */
2019
2020 overlay_cache_invalid = 1;
2021 registers_changed ();
2022
2023 if (deprecated_target_wait_hook)
2024 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2025 else
2026 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2027
2028 if (debug_infrun)
2029 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2030
2031 if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2032 {
2033 xfree (ecs->ws.value.execd_pathname);
2034 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2035 ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2036 }
2037
2038 /* If an error happens while handling the event, propagate GDB's
2039 knowledge of the executing state to the frontend/user running
2040 state. */
2041 old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2042
2043 /* Now figure out what to do with the result of the result. */
2044 handle_inferior_event (ecs);
2045
2046 /* No error, don't finish the state yet. */
2047 discard_cleanups (old_chain);
2048
2049 if (!ecs->wait_some_more)
2050 break;
2051 }
2052
2053 do_cleanups (old_cleanups);
2054 }
2055
2056 /* Asynchronous version of wait_for_inferior. It is called by the
2057 event loop whenever a change of state is detected on the file
2058 descriptor corresponding to the target. It can be called more than
2059 once to complete a single execution command. In such cases we need
2060 to keep the state in a global variable ECSS. If it is the last time
2061 that this function is called for a single execution command, then
2062 report to the user that the inferior has stopped, and do the
2063 necessary cleanups. */
2064
2065 void
2066 fetch_inferior_event (void *client_data)
2067 {
2068 struct execution_control_state ecss;
2069 struct execution_control_state *ecs = &ecss;
2070 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2071 struct cleanup *ts_old_chain;
2072 int was_sync = sync_execution;
2073
2074 memset (ecs, 0, sizeof (*ecs));
2075
2076 /* We'll update this if & when we switch to a new thread. */
2077 previous_inferior_ptid = inferior_ptid;
2078
2079 if (non_stop)
2080 /* In non-stop mode, the user/frontend should not notice a thread
2081 switch due to internal events. Make sure we reverse to the
2082 user selected thread and frame after handling the event and
2083 running any breakpoint commands. */
2084 make_cleanup_restore_current_thread ();
2085
2086 /* We have to invalidate the registers BEFORE calling target_wait
2087 because they can be loaded from the target while in target_wait.
2088 This makes remote debugging a bit more efficient for those
2089 targets that provide critical registers as part of their normal
2090 status mechanism. */
2091
2092 overlay_cache_invalid = 1;
2093 registers_changed ();
2094
2095 if (deprecated_target_wait_hook)
2096 ecs->ptid =
2097 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2098 else
2099 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2100
2101 if (debug_infrun)
2102 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2103
2104 if (non_stop
2105 && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2106 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2107 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2108 /* In non-stop mode, each thread is handled individually. Switch
2109 early, so the global state is set correctly for this
2110 thread. */
2111 context_switch (ecs->ptid);
2112
2113 /* If an error happens while handling the event, propagate GDB's
2114 knowledge of the executing state to the frontend/user running
2115 state. */
2116 if (!non_stop)
2117 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2118 else
2119 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2120
2121 /* Now figure out what to do with the result of the result. */
2122 handle_inferior_event (ecs);
2123
2124 if (!ecs->wait_some_more)
2125 {
2126 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2127
2128 delete_step_thread_step_resume_breakpoint ();
2129
2130 /* We may not find an inferior if this was a process exit. */
2131 if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2132 normal_stop ();
2133
2134 if (target_has_execution
2135 && ecs->ws.kind != TARGET_WAITKIND_EXITED
2136 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2137 && ecs->event_thread->step_multi
2138 && ecs->event_thread->stop_step)
2139 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2140 else
2141 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2142 }
2143
2144 /* No error, don't finish the thread states yet. */
2145 discard_cleanups (ts_old_chain);
2146
2147 /* Revert thread and frame. */
2148 do_cleanups (old_chain);
2149
2150 /* If the inferior was in sync execution mode, and now isn't,
2151 restore the prompt. */
2152 if (was_sync && !sync_execution)
2153 display_gdb_prompt (0);
2154 }
2155
2156 /* Record the frame and location we're currently stepping through. */
2157 void
2158 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2159 {
2160 struct thread_info *tp = inferior_thread ();
2161
2162 tp->step_frame_id = get_frame_id (frame);
2163 tp->step_stack_frame_id = get_stack_frame_id (frame);
2164
2165 tp->current_symtab = sal.symtab;
2166 tp->current_line = sal.line;
2167 }
2168
2169 /* Prepare an execution control state for looping through a
2170 wait_for_inferior-type loop. */
2171
2172 static void
2173 init_execution_control_state (struct execution_control_state *ecs)
2174 {
2175 ecs->random_signal = 0;
2176 }
2177
2178 /* Clear context switchable stepping state. */
2179
2180 void
2181 init_thread_stepping_state (struct thread_info *tss)
2182 {
2183 tss->stepping_over_breakpoint = 0;
2184 tss->step_after_step_resume_breakpoint = 0;
2185 tss->stepping_through_solib_after_catch = 0;
2186 tss->stepping_through_solib_catchpoints = NULL;
2187 }
2188
2189 /* Return the cached copy of the last pid/waitstatus returned by
2190 target_wait()/deprecated_target_wait_hook(). The data is actually
2191 cached by handle_inferior_event(), which gets called immediately
2192 after target_wait()/deprecated_target_wait_hook(). */
2193
2194 void
2195 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2196 {
2197 *ptidp = target_last_wait_ptid;
2198 *status = target_last_waitstatus;
2199 }
2200
2201 void
2202 nullify_last_target_wait_ptid (void)
2203 {
2204 target_last_wait_ptid = minus_one_ptid;
2205 }
2206
2207 /* Switch thread contexts. */
2208
2209 static void
2210 context_switch (ptid_t ptid)
2211 {
2212 if (debug_infrun)
2213 {
2214 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2215 target_pid_to_str (inferior_ptid));
2216 fprintf_unfiltered (gdb_stdlog, "to %s\n",
2217 target_pid_to_str (ptid));
2218 }
2219
2220 switch_to_thread (ptid);
2221 }
2222
2223 static void
2224 adjust_pc_after_break (struct execution_control_state *ecs)
2225 {
2226 struct regcache *regcache;
2227 struct gdbarch *gdbarch;
2228 CORE_ADDR breakpoint_pc;
2229
2230 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
2231 we aren't, just return.
2232
2233 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2234 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
2235 implemented by software breakpoints should be handled through the normal
2236 breakpoint layer.
2237
2238 NOTE drow/2004-01-31: On some targets, breakpoints may generate
2239 different signals (SIGILL or SIGEMT for instance), but it is less
2240 clear where the PC is pointing afterwards. It may not match
2241 gdbarch_decr_pc_after_break. I don't know any specific target that
2242 generates these signals at breakpoints (the code has been in GDB since at
2243 least 1992) so I can not guess how to handle them here.
2244
2245 In earlier versions of GDB, a target with
2246 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2247 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
2248 target with both of these set in GDB history, and it seems unlikely to be
2249 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
2250
2251 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2252 return;
2253
2254 if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2255 return;
2256
2257 /* In reverse execution, when a breakpoint is hit, the instruction
2258 under it has already been de-executed. The reported PC always
2259 points at the breakpoint address, so adjusting it further would
2260 be wrong. E.g., consider this case on a decr_pc_after_break == 1
2261 architecture:
2262
2263 B1 0x08000000 : INSN1
2264 B2 0x08000001 : INSN2
2265 0x08000002 : INSN3
2266 PC -> 0x08000003 : INSN4
2267
2268 Say you're stopped at 0x08000003 as above. Reverse continuing
2269 from that point should hit B2 as below. Reading the PC when the
2270 SIGTRAP is reported should read 0x08000001 and INSN2 should have
2271 been de-executed already.
2272
2273 B1 0x08000000 : INSN1
2274 B2 PC -> 0x08000001 : INSN2
2275 0x08000002 : INSN3
2276 0x08000003 : INSN4
2277
2278 We can't apply the same logic as for forward execution, because
2279 we would wrongly adjust the PC to 0x08000000, since there's a
2280 breakpoint at PC - 1. We'd then report a hit on B1, although
2281 INSN1 hadn't been de-executed yet. Doing nothing is the correct
2282 behaviour. */
2283 if (execution_direction == EXEC_REVERSE)
2284 return;
2285
2286 /* If this target does not decrement the PC after breakpoints, then
2287 we have nothing to do. */
2288 regcache = get_thread_regcache (ecs->ptid);
2289 gdbarch = get_regcache_arch (regcache);
2290 if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2291 return;
2292
2293 /* Find the location where (if we've hit a breakpoint) the
2294 breakpoint would be. */
2295 breakpoint_pc = regcache_read_pc (regcache)
2296 - gdbarch_decr_pc_after_break (gdbarch);
2297
2298 /* Check whether there actually is a software breakpoint inserted at
2299 that location.
2300
2301 If in non-stop mode, a race condition is possible where we've
2302 removed a breakpoint, but stop events for that breakpoint were
2303 already queued and arrive later. To suppress those spurious
2304 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2305 and retire them after a number of stop events are reported. */
2306 if (software_breakpoint_inserted_here_p (breakpoint_pc)
2307 || (non_stop && moribund_breakpoint_here_p (breakpoint_pc)))
2308 {
2309 struct cleanup *old_cleanups = NULL;
2310 if (RECORD_IS_USED)
2311 old_cleanups = record_gdb_operation_disable_set ();
2312
2313 /* When using hardware single-step, a SIGTRAP is reported for both
2314 a completed single-step and a software breakpoint. Need to
2315 differentiate between the two, as the latter needs adjusting
2316 but the former does not.
2317
2318 The SIGTRAP can be due to a completed hardware single-step only if
2319 - we didn't insert software single-step breakpoints
2320 - the thread to be examined is still the current thread
2321 - this thread is currently being stepped
2322
2323 If any of these events did not occur, we must have stopped due
2324 to hitting a software breakpoint, and have to back up to the
2325 breakpoint address.
2326
2327 As a special case, we could have hardware single-stepped a
2328 software breakpoint. In this case (prev_pc == breakpoint_pc),
2329 we also need to back up to the breakpoint address. */
2330
2331 if (singlestep_breakpoints_inserted_p
2332 || !ptid_equal (ecs->ptid, inferior_ptid)
2333 || !currently_stepping (ecs->event_thread)
2334 || ecs->event_thread->prev_pc == breakpoint_pc)
2335 regcache_write_pc (regcache, breakpoint_pc);
2336
2337 if (RECORD_IS_USED)
2338 do_cleanups (old_cleanups);
2339 }
2340 }
2341
2342 void
2343 init_infwait_state (void)
2344 {
2345 waiton_ptid = pid_to_ptid (-1);
2346 infwait_state = infwait_normal_state;
2347 }
2348
2349 void
2350 error_is_running (void)
2351 {
2352 error (_("\
2353 Cannot execute this command while the selected thread is running."));
2354 }
2355
2356 void
2357 ensure_not_running (void)
2358 {
2359 if (is_running (inferior_ptid))
2360 error_is_running ();
2361 }
2362
2363 static int
2364 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2365 {
2366 for (frame = get_prev_frame (frame);
2367 frame != NULL;
2368 frame = get_prev_frame (frame))
2369 {
2370 if (frame_id_eq (get_frame_id (frame), step_frame_id))
2371 return 1;
2372 if (get_frame_type (frame) != INLINE_FRAME)
2373 break;
2374 }
2375
2376 return 0;
2377 }
2378
2379 /* Given an execution control state that has been freshly filled in
2380 by an event from the inferior, figure out what it means and take
2381 appropriate action. */
2382
2383 static void
2384 handle_inferior_event (struct execution_control_state *ecs)
2385 {
2386 struct frame_info *frame;
2387 struct gdbarch *gdbarch;
2388 int sw_single_step_trap_p = 0;
2389 int stopped_by_watchpoint;
2390 int stepped_after_stopped_by_watchpoint = 0;
2391 struct symtab_and_line stop_pc_sal;
2392 enum stop_kind stop_soon;
2393
2394 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2395 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2396 && ecs->ws.kind != TARGET_WAITKIND_IGNORE)
2397 {
2398 struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2399 gdb_assert (inf);
2400 stop_soon = inf->stop_soon;
2401 }
2402 else
2403 stop_soon = NO_STOP_QUIETLY;
2404
2405 /* Cache the last pid/waitstatus. */
2406 target_last_wait_ptid = ecs->ptid;
2407 target_last_waitstatus = ecs->ws;
2408
2409 /* Always clear state belonging to the previous time we stopped. */
2410 stop_stack_dummy = 0;
2411
2412 /* If it's a new process, add it to the thread database */
2413
2414 ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2415 && !ptid_equal (ecs->ptid, minus_one_ptid)
2416 && !in_thread_list (ecs->ptid));
2417
2418 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2419 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2420 add_thread (ecs->ptid);
2421
2422 ecs->event_thread = find_thread_ptid (ecs->ptid);
2423
2424 /* Dependent on valid ECS->EVENT_THREAD. */
2425 adjust_pc_after_break (ecs);
2426
2427 /* Dependent on the current PC value modified by adjust_pc_after_break. */
2428 reinit_frame_cache ();
2429
2430 if (ecs->ws.kind != TARGET_WAITKIND_IGNORE)
2431 {
2432 breakpoint_retire_moribund ();
2433
2434 /* Mark the non-executing threads accordingly. In all-stop, all
2435 threads of all processes are stopped when we get any event
2436 reported. In non-stop mode, only the event thread stops. If
2437 we're handling a process exit in non-stop mode, there's
2438 nothing to do, as threads of the dead process are gone, and
2439 threads of any other process were left running. */
2440 if (!non_stop)
2441 set_executing (minus_one_ptid, 0);
2442 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2443 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
2444 set_executing (inferior_ptid, 0);
2445 }
2446
2447 switch (infwait_state)
2448 {
2449 case infwait_thread_hop_state:
2450 if (debug_infrun)
2451 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
2452 break;
2453
2454 case infwait_normal_state:
2455 if (debug_infrun)
2456 fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
2457 break;
2458
2459 case infwait_step_watch_state:
2460 if (debug_infrun)
2461 fprintf_unfiltered (gdb_stdlog,
2462 "infrun: infwait_step_watch_state\n");
2463
2464 stepped_after_stopped_by_watchpoint = 1;
2465 break;
2466
2467 case infwait_nonstep_watch_state:
2468 if (debug_infrun)
2469 fprintf_unfiltered (gdb_stdlog,
2470 "infrun: infwait_nonstep_watch_state\n");
2471 insert_breakpoints ();
2472
2473 /* FIXME-maybe: is this cleaner than setting a flag? Does it
2474 handle things like signals arriving and other things happening
2475 in combination correctly? */
2476 stepped_after_stopped_by_watchpoint = 1;
2477 break;
2478
2479 default:
2480 internal_error (__FILE__, __LINE__, _("bad switch"));
2481 }
2482
2483 infwait_state = infwait_normal_state;
2484 waiton_ptid = pid_to_ptid (-1);
2485
2486 switch (ecs->ws.kind)
2487 {
2488 case TARGET_WAITKIND_LOADED:
2489 if (debug_infrun)
2490 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
2491 /* Ignore gracefully during startup of the inferior, as it might
2492 be the shell which has just loaded some objects, otherwise
2493 add the symbols for the newly loaded objects. Also ignore at
2494 the beginning of an attach or remote session; we will query
2495 the full list of libraries once the connection is
2496 established. */
2497 if (stop_soon == NO_STOP_QUIETLY)
2498 {
2499 /* Check for any newly added shared libraries if we're
2500 supposed to be adding them automatically. Switch
2501 terminal for any messages produced by
2502 breakpoint_re_set. */
2503 target_terminal_ours_for_output ();
2504 /* NOTE: cagney/2003-11-25: Make certain that the target
2505 stack's section table is kept up-to-date. Architectures,
2506 (e.g., PPC64), use the section table to perform
2507 operations such as address => section name and hence
2508 require the table to contain all sections (including
2509 those found in shared libraries). */
2510 #ifdef SOLIB_ADD
2511 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
2512 #else
2513 solib_add (NULL, 0, &current_target, auto_solib_add);
2514 #endif
2515 target_terminal_inferior ();
2516
2517 /* If requested, stop when the dynamic linker notifies
2518 gdb of events. This allows the user to get control
2519 and place breakpoints in initializer routines for
2520 dynamically loaded objects (among other things). */
2521 if (stop_on_solib_events)
2522 {
2523 stop_stepping (ecs);
2524 return;
2525 }
2526
2527 /* NOTE drow/2007-05-11: This might be a good place to check
2528 for "catch load". */
2529 }
2530
2531 /* If we are skipping through a shell, or through shared library
2532 loading that we aren't interested in, resume the program. If
2533 we're running the program normally, also resume. But stop if
2534 we're attaching or setting up a remote connection. */
2535 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
2536 {
2537 /* Loading of shared libraries might have changed breakpoint
2538 addresses. Make sure new breakpoints are inserted. */
2539 if (stop_soon == NO_STOP_QUIETLY
2540 && !breakpoints_always_inserted_mode ())
2541 insert_breakpoints ();
2542 resume (0, TARGET_SIGNAL_0);
2543 prepare_to_wait (ecs);
2544 return;
2545 }
2546
2547 break;
2548
2549 case TARGET_WAITKIND_SPURIOUS:
2550 if (debug_infrun)
2551 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
2552 resume (0, TARGET_SIGNAL_0);
2553 prepare_to_wait (ecs);
2554 return;
2555
2556 case TARGET_WAITKIND_EXITED:
2557 if (debug_infrun)
2558 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
2559 inferior_ptid = ecs->ptid;
2560 target_terminal_ours (); /* Must do this before mourn anyway */
2561 print_stop_reason (EXITED, ecs->ws.value.integer);
2562
2563 /* Record the exit code in the convenience variable $_exitcode, so
2564 that the user can inspect this again later. */
2565 set_internalvar_integer (lookup_internalvar ("_exitcode"),
2566 (LONGEST) ecs->ws.value.integer);
2567 gdb_flush (gdb_stdout);
2568 target_mourn_inferior ();
2569 singlestep_breakpoints_inserted_p = 0;
2570 stop_print_frame = 0;
2571 stop_stepping (ecs);
2572 return;
2573
2574 case TARGET_WAITKIND_SIGNALLED:
2575 if (debug_infrun)
2576 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
2577 inferior_ptid = ecs->ptid;
2578 stop_print_frame = 0;
2579 target_terminal_ours (); /* Must do this before mourn anyway */
2580
2581 /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
2582 reach here unless the inferior is dead. However, for years
2583 target_kill() was called here, which hints that fatal signals aren't
2584 really fatal on some systems. If that's true, then some changes
2585 may be needed. */
2586 target_mourn_inferior ();
2587
2588 print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
2589 singlestep_breakpoints_inserted_p = 0;
2590 stop_stepping (ecs);
2591 return;
2592
2593 /* The following are the only cases in which we keep going;
2594 the above cases end in a continue or goto. */
2595 case TARGET_WAITKIND_FORKED:
2596 case TARGET_WAITKIND_VFORKED:
2597 if (debug_infrun)
2598 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
2599
2600 if (!ptid_equal (ecs->ptid, inferior_ptid))
2601 {
2602 context_switch (ecs->ptid);
2603 reinit_frame_cache ();
2604 }
2605
2606 /* Immediately detach breakpoints from the child before there's
2607 any chance of letting the user delete breakpoints from the
2608 breakpoint lists. If we don't do this early, it's easy to
2609 leave left over traps in the child, vis: "break foo; catch
2610 fork; c; <fork>; del; c; <child calls foo>". We only follow
2611 the fork on the last `continue', and by that time the
2612 breakpoint at "foo" is long gone from the breakpoint table.
2613 If we vforked, then we don't need to unpatch here, since both
2614 parent and child are sharing the same memory pages; we'll
2615 need to unpatch at follow/detach time instead to be certain
2616 that new breakpoints added between catchpoint hit time and
2617 vfork follow are detached. */
2618 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
2619 {
2620 int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
2621
2622 /* This won't actually modify the breakpoint list, but will
2623 physically remove the breakpoints from the child. */
2624 detach_breakpoints (child_pid);
2625 }
2626
2627 /* In case the event is caught by a catchpoint, remember that
2628 the event is to be followed at the next resume of the thread,
2629 and not immediately. */
2630 ecs->event_thread->pending_follow = ecs->ws;
2631
2632 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2633
2634 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2635
2636 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2637
2638 /* If no catchpoint triggered for this, then keep going. */
2639 if (ecs->random_signal)
2640 {
2641 int should_resume;
2642
2643 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2644
2645 should_resume = follow_fork ();
2646
2647 ecs->event_thread = inferior_thread ();
2648 ecs->ptid = inferior_ptid;
2649
2650 if (should_resume)
2651 keep_going (ecs);
2652 else
2653 stop_stepping (ecs);
2654 return;
2655 }
2656 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2657 goto process_event_stop_test;
2658
2659 case TARGET_WAITKIND_EXECD:
2660 if (debug_infrun)
2661 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
2662
2663 if (!ptid_equal (ecs->ptid, inferior_ptid))
2664 {
2665 context_switch (ecs->ptid);
2666 reinit_frame_cache ();
2667 }
2668
2669 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2670
2671 /* This causes the eventpoints and symbol table to be reset.
2672 Must do this now, before trying to determine whether to
2673 stop. */
2674 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
2675
2676 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
2677 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2678
2679 /* Note that this may be referenced from inside
2680 bpstat_stop_status above, through inferior_has_execd. */
2681 xfree (ecs->ws.value.execd_pathname);
2682 ecs->ws.value.execd_pathname = NULL;
2683
2684 /* If no catchpoint triggered for this, then keep going. */
2685 if (ecs->random_signal)
2686 {
2687 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2688 keep_going (ecs);
2689 return;
2690 }
2691 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2692 goto process_event_stop_test;
2693
2694 /* Be careful not to try to gather much state about a thread
2695 that's in a syscall. It's frequently a losing proposition. */
2696 case TARGET_WAITKIND_SYSCALL_ENTRY:
2697 if (debug_infrun)
2698 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
2699 resume (0, TARGET_SIGNAL_0);
2700 prepare_to_wait (ecs);
2701 return;
2702
2703 /* Before examining the threads further, step this thread to
2704 get it entirely out of the syscall. (We get notice of the
2705 event when the thread is just on the verge of exiting a
2706 syscall. Stepping one instruction seems to get it back
2707 into user code.) */
2708 case TARGET_WAITKIND_SYSCALL_RETURN:
2709 if (debug_infrun)
2710 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
2711 target_resume (ecs->ptid, 1, TARGET_SIGNAL_0);
2712 prepare_to_wait (ecs);
2713 return;
2714
2715 case TARGET_WAITKIND_STOPPED:
2716 if (debug_infrun)
2717 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
2718 ecs->event_thread->stop_signal = ecs->ws.value.sig;
2719 break;
2720
2721 case TARGET_WAITKIND_NO_HISTORY:
2722 /* Reverse execution: target ran out of history info. */
2723 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2724 print_stop_reason (NO_HISTORY, 0);
2725 stop_stepping (ecs);
2726 return;
2727
2728 /* We had an event in the inferior, but we are not interested
2729 in handling it at this level. The lower layers have already
2730 done what needs to be done, if anything.
2731
2732 One of the possible circumstances for this is when the
2733 inferior produces output for the console. The inferior has
2734 not stopped, and we are ignoring the event. Another possible
2735 circumstance is any event which the lower level knows will be
2736 reported multiple times without an intervening resume. */
2737 case TARGET_WAITKIND_IGNORE:
2738 if (debug_infrun)
2739 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2740 prepare_to_wait (ecs);
2741 return;
2742 }
2743
2744 if (ecs->new_thread_event)
2745 {
2746 if (non_stop)
2747 /* Non-stop assumes that the target handles adding new threads
2748 to the thread list. */
2749 internal_error (__FILE__, __LINE__, "\
2750 targets should add new threads to the thread list themselves in non-stop mode.");
2751
2752 /* We may want to consider not doing a resume here in order to
2753 give the user a chance to play with the new thread. It might
2754 be good to make that a user-settable option. */
2755
2756 /* At this point, all threads are stopped (happens automatically
2757 in either the OS or the native code). Therefore we need to
2758 continue all threads in order to make progress. */
2759
2760 if (!ptid_equal (ecs->ptid, inferior_ptid))
2761 context_switch (ecs->ptid);
2762 target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
2763 prepare_to_wait (ecs);
2764 return;
2765 }
2766
2767 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
2768 {
2769 /* Do we need to clean up the state of a thread that has
2770 completed a displaced single-step? (Doing so usually affects
2771 the PC, so do it here, before we set stop_pc.) */
2772 displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
2773
2774 /* If we either finished a single-step or hit a breakpoint, but
2775 the user wanted this thread to be stopped, pretend we got a
2776 SIG0 (generic unsignaled stop). */
2777
2778 if (ecs->event_thread->stop_requested
2779 && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2780 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2781 }
2782
2783 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
2784
2785 if (debug_infrun)
2786 {
2787 struct regcache *regcache = get_thread_regcache (ecs->ptid);
2788 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2789
2790 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
2791 paddress (gdbarch, stop_pc));
2792 if (target_stopped_by_watchpoint ())
2793 {
2794 CORE_ADDR addr;
2795 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
2796
2797 if (target_stopped_data_address (&current_target, &addr))
2798 fprintf_unfiltered (gdb_stdlog,
2799 "infrun: stopped data address = %s\n",
2800 paddress (gdbarch, addr));
2801 else
2802 fprintf_unfiltered (gdb_stdlog,
2803 "infrun: (no data address available)\n");
2804 }
2805 }
2806
2807 if (stepping_past_singlestep_breakpoint)
2808 {
2809 gdb_assert (singlestep_breakpoints_inserted_p);
2810 gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
2811 gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
2812
2813 stepping_past_singlestep_breakpoint = 0;
2814
2815 /* We've either finished single-stepping past the single-step
2816 breakpoint, or stopped for some other reason. It would be nice if
2817 we could tell, but we can't reliably. */
2818 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2819 {
2820 if (debug_infrun)
2821 fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
2822 /* Pull the single step breakpoints out of the target. */
2823 remove_single_step_breakpoints ();
2824 singlestep_breakpoints_inserted_p = 0;
2825
2826 ecs->random_signal = 0;
2827 ecs->event_thread->trap_expected = 0;
2828
2829 context_switch (saved_singlestep_ptid);
2830 if (deprecated_context_hook)
2831 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
2832
2833 resume (1, TARGET_SIGNAL_0);
2834 prepare_to_wait (ecs);
2835 return;
2836 }
2837 }
2838
2839 if (!ptid_equal (deferred_step_ptid, null_ptid))
2840 {
2841 /* In non-stop mode, there's never a deferred_step_ptid set. */
2842 gdb_assert (!non_stop);
2843
2844 /* If we stopped for some other reason than single-stepping, ignore
2845 the fact that we were supposed to switch back. */
2846 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2847 {
2848 if (debug_infrun)
2849 fprintf_unfiltered (gdb_stdlog,
2850 "infrun: handling deferred step\n");
2851
2852 /* Pull the single step breakpoints out of the target. */
2853 if (singlestep_breakpoints_inserted_p)
2854 {
2855 remove_single_step_breakpoints ();
2856 singlestep_breakpoints_inserted_p = 0;
2857 }
2858
2859 /* Note: We do not call context_switch at this point, as the
2860 context is already set up for stepping the original thread. */
2861 switch_to_thread (deferred_step_ptid);
2862 deferred_step_ptid = null_ptid;
2863 /* Suppress spurious "Switching to ..." message. */
2864 previous_inferior_ptid = inferior_ptid;
2865
2866 resume (1, TARGET_SIGNAL_0);
2867 prepare_to_wait (ecs);
2868 return;
2869 }
2870
2871 deferred_step_ptid = null_ptid;
2872 }
2873
2874 /* See if a thread hit a thread-specific breakpoint that was meant for
2875 another thread. If so, then step that thread past the breakpoint,
2876 and continue it. */
2877
2878 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
2879 {
2880 int thread_hop_needed = 0;
2881
2882 /* Check if a regular breakpoint has been hit before checking
2883 for a potential single step breakpoint. Otherwise, GDB will
2884 not see this breakpoint hit when stepping onto breakpoints. */
2885 if (regular_breakpoint_inserted_here_p (stop_pc))
2886 {
2887 ecs->random_signal = 0;
2888 if (!breakpoint_thread_match (stop_pc, ecs->ptid))
2889 thread_hop_needed = 1;
2890 }
2891 else if (singlestep_breakpoints_inserted_p)
2892 {
2893 /* We have not context switched yet, so this should be true
2894 no matter which thread hit the singlestep breakpoint. */
2895 gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
2896 if (debug_infrun)
2897 fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
2898 "trap for %s\n",
2899 target_pid_to_str (ecs->ptid));
2900
2901 ecs->random_signal = 0;
2902 /* The call to in_thread_list is necessary because PTIDs sometimes
2903 change when we go from single-threaded to multi-threaded. If
2904 the singlestep_ptid is still in the list, assume that it is
2905 really different from ecs->ptid. */
2906 if (!ptid_equal (singlestep_ptid, ecs->ptid)
2907 && in_thread_list (singlestep_ptid))
2908 {
2909 /* If the PC of the thread we were trying to single-step
2910 has changed, discard this event (which we were going
2911 to ignore anyway), and pretend we saw that thread
2912 trap. This prevents us continuously moving the
2913 single-step breakpoint forward, one instruction at a
2914 time. If the PC has changed, then the thread we were
2915 trying to single-step has trapped or been signalled,
2916 but the event has not been reported to GDB yet.
2917
2918 There might be some cases where this loses signal
2919 information, if a signal has arrived at exactly the
2920 same time that the PC changed, but this is the best
2921 we can do with the information available. Perhaps we
2922 should arrange to report all events for all threads
2923 when they stop, or to re-poll the remote looking for
2924 this particular thread (i.e. temporarily enable
2925 schedlock). */
2926
2927 CORE_ADDR new_singlestep_pc
2928 = regcache_read_pc (get_thread_regcache (singlestep_ptid));
2929
2930 if (new_singlestep_pc != singlestep_pc)
2931 {
2932 enum target_signal stop_signal;
2933
2934 if (debug_infrun)
2935 fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
2936 " but expected thread advanced also\n");
2937
2938 /* The current context still belongs to
2939 singlestep_ptid. Don't swap here, since that's
2940 the context we want to use. Just fudge our
2941 state and continue. */
2942 stop_signal = ecs->event_thread->stop_signal;
2943 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2944 ecs->ptid = singlestep_ptid;
2945 ecs->event_thread = find_thread_ptid (ecs->ptid);
2946 ecs->event_thread->stop_signal = stop_signal;
2947 stop_pc = new_singlestep_pc;
2948 }
2949 else
2950 {
2951 if (debug_infrun)
2952 fprintf_unfiltered (gdb_stdlog,
2953 "infrun: unexpected thread\n");
2954
2955 thread_hop_needed = 1;
2956 stepping_past_singlestep_breakpoint = 1;
2957 saved_singlestep_ptid = singlestep_ptid;
2958 }
2959 }
2960 }
2961
2962 if (thread_hop_needed)
2963 {
2964 struct regcache *thread_regcache;
2965 int remove_status = 0;
2966
2967 if (debug_infrun)
2968 fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
2969
2970 /* Switch context before touching inferior memory, the
2971 previous thread may have exited. */
2972 if (!ptid_equal (inferior_ptid, ecs->ptid))
2973 context_switch (ecs->ptid);
2974
2975 /* Saw a breakpoint, but it was hit by the wrong thread.
2976 Just continue. */
2977
2978 if (singlestep_breakpoints_inserted_p)
2979 {
2980 /* Pull the single step breakpoints out of the target. */
2981 remove_single_step_breakpoints ();
2982 singlestep_breakpoints_inserted_p = 0;
2983 }
2984
2985 /* If the arch can displace step, don't remove the
2986 breakpoints. */
2987 thread_regcache = get_thread_regcache (ecs->ptid);
2988 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
2989 remove_status = remove_breakpoints ();
2990
2991 /* Did we fail to remove breakpoints? If so, try
2992 to set the PC past the bp. (There's at least
2993 one situation in which we can fail to remove
2994 the bp's: On HP-UX's that use ttrace, we can't
2995 change the address space of a vforking child
2996 process until the child exits (well, okay, not
2997 then either :-) or execs. */
2998 if (remove_status != 0)
2999 error (_("Cannot step over breakpoint hit in wrong thread"));
3000 else
3001 { /* Single step */
3002 if (!non_stop)
3003 {
3004 /* Only need to require the next event from this
3005 thread in all-stop mode. */
3006 waiton_ptid = ecs->ptid;
3007 infwait_state = infwait_thread_hop_state;
3008 }
3009
3010 ecs->event_thread->stepping_over_breakpoint = 1;
3011 keep_going (ecs);
3012 return;
3013 }
3014 }
3015 else if (singlestep_breakpoints_inserted_p)
3016 {
3017 sw_single_step_trap_p = 1;
3018 ecs->random_signal = 0;
3019 }
3020 }
3021 else
3022 ecs->random_signal = 1;
3023
3024 /* See if something interesting happened to the non-current thread. If
3025 so, then switch to that thread. */
3026 if (!ptid_equal (ecs->ptid, inferior_ptid))
3027 {
3028 if (debug_infrun)
3029 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3030
3031 context_switch (ecs->ptid);
3032
3033 if (deprecated_context_hook)
3034 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3035 }
3036
3037 /* At this point, get hold of the now-current thread's frame. */
3038 frame = get_current_frame ();
3039 gdbarch = get_frame_arch (frame);
3040
3041 if (singlestep_breakpoints_inserted_p)
3042 {
3043 /* Pull the single step breakpoints out of the target. */
3044 remove_single_step_breakpoints ();
3045 singlestep_breakpoints_inserted_p = 0;
3046 }
3047
3048 if (stepped_after_stopped_by_watchpoint)
3049 stopped_by_watchpoint = 0;
3050 else
3051 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3052
3053 /* If necessary, step over this watchpoint. We'll be back to display
3054 it in a moment. */
3055 if (stopped_by_watchpoint
3056 && (target_have_steppable_watchpoint
3057 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3058 {
3059 /* At this point, we are stopped at an instruction which has
3060 attempted to write to a piece of memory under control of
3061 a watchpoint. The instruction hasn't actually executed
3062 yet. If we were to evaluate the watchpoint expression
3063 now, we would get the old value, and therefore no change
3064 would seem to have occurred.
3065
3066 In order to make watchpoints work `right', we really need
3067 to complete the memory write, and then evaluate the
3068 watchpoint expression. We do this by single-stepping the
3069 target.
3070
3071 It may not be necessary to disable the watchpoint to stop over
3072 it. For example, the PA can (with some kernel cooperation)
3073 single step over a watchpoint without disabling the watchpoint.
3074
3075 It is far more common to need to disable a watchpoint to step
3076 the inferior over it. If we have non-steppable watchpoints,
3077 we must disable the current watchpoint; it's simplest to
3078 disable all watchpoints and breakpoints. */
3079 int hw_step = 1;
3080
3081 if (!target_have_steppable_watchpoint)
3082 remove_breakpoints ();
3083 /* Single step */
3084 hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3085 target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3086 waiton_ptid = ecs->ptid;
3087 if (target_have_steppable_watchpoint)
3088 infwait_state = infwait_step_watch_state;
3089 else
3090 infwait_state = infwait_nonstep_watch_state;
3091 prepare_to_wait (ecs);
3092 return;
3093 }
3094
3095 ecs->stop_func_start = 0;
3096 ecs->stop_func_end = 0;
3097 ecs->stop_func_name = 0;
3098 /* Don't care about return value; stop_func_start and stop_func_name
3099 will both be 0 if it doesn't work. */
3100 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3101 &ecs->stop_func_start, &ecs->stop_func_end);
3102 ecs->stop_func_start
3103 += gdbarch_deprecated_function_start_offset (gdbarch);
3104 ecs->event_thread->stepping_over_breakpoint = 0;
3105 bpstat_clear (&ecs->event_thread->stop_bpstat);
3106 ecs->event_thread->stop_step = 0;
3107 stop_print_frame = 1;
3108 ecs->random_signal = 0;
3109 stopped_by_random_signal = 0;
3110
3111 /* Hide inlined functions starting here, unless we just performed stepi or
3112 nexti. After stepi and nexti, always show the innermost frame (not any
3113 inline function call sites). */
3114 if (ecs->event_thread->step_range_end != 1)
3115 skip_inline_frames (ecs->ptid);
3116
3117 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3118 && ecs->event_thread->trap_expected
3119 && gdbarch_single_step_through_delay_p (gdbarch)
3120 && currently_stepping (ecs->event_thread))
3121 {
3122 /* We're trying to step off a breakpoint. Turns out that we're
3123 also on an instruction that needs to be stepped multiple
3124 times before it's been fully executing. E.g., architectures
3125 with a delay slot. It needs to be stepped twice, once for
3126 the instruction and once for the delay slot. */
3127 int step_through_delay
3128 = gdbarch_single_step_through_delay (gdbarch, frame);
3129 if (debug_infrun && step_through_delay)
3130 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3131 if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3132 {
3133 /* The user issued a continue when stopped at a breakpoint.
3134 Set up for another trap and get out of here. */
3135 ecs->event_thread->stepping_over_breakpoint = 1;
3136 keep_going (ecs);
3137 return;
3138 }
3139 else if (step_through_delay)
3140 {
3141 /* The user issued a step when stopped at a breakpoint.
3142 Maybe we should stop, maybe we should not - the delay
3143 slot *might* correspond to a line of source. In any
3144 case, don't decide that here, just set
3145 ecs->stepping_over_breakpoint, making sure we
3146 single-step again before breakpoints are re-inserted. */
3147 ecs->event_thread->stepping_over_breakpoint = 1;
3148 }
3149 }
3150
3151 /* Look at the cause of the stop, and decide what to do.
3152 The alternatives are:
3153 1) stop_stepping and return; to really stop and return to the debugger,
3154 2) keep_going and return to start up again
3155 (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3156 3) set ecs->random_signal to 1, and the decision between 1 and 2
3157 will be made according to the signal handling tables. */
3158
3159 /* First, distinguish signals caused by the debugger from signals
3160 that have to do with the program's own actions. Note that
3161 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3162 on the operating system version. Here we detect when a SIGILL or
3163 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3164 something similar for SIGSEGV, since a SIGSEGV will be generated
3165 when we're trying to execute a breakpoint instruction on a
3166 non-executable stack. This happens for call dummy breakpoints
3167 for architectures like SPARC that place call dummies on the
3168 stack.
3169
3170 If we're doing a displaced step past a breakpoint, then the
3171 breakpoint is always inserted at the original instruction;
3172 non-standard signals can't be explained by the breakpoint. */
3173 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3174 || (! ecs->event_thread->trap_expected
3175 && breakpoint_inserted_here_p (stop_pc)
3176 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_ILL
3177 || ecs->event_thread->stop_signal == TARGET_SIGNAL_SEGV
3178 || ecs->event_thread->stop_signal == TARGET_SIGNAL_EMT))
3179 || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3180 || stop_soon == STOP_QUIETLY_REMOTE)
3181 {
3182 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3183 {
3184 if (debug_infrun)
3185 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3186 stop_print_frame = 0;
3187 stop_stepping (ecs);
3188 return;
3189 }
3190
3191 /* This is originated from start_remote(), start_inferior() and
3192 shared libraries hook functions. */
3193 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3194 {
3195 if (debug_infrun)
3196 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3197 stop_stepping (ecs);
3198 return;
3199 }
3200
3201 /* This originates from attach_command(). We need to overwrite
3202 the stop_signal here, because some kernels don't ignore a
3203 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3204 See more comments in inferior.h. On the other hand, if we
3205 get a non-SIGSTOP, report it to the user - assume the backend
3206 will handle the SIGSTOP if it should show up later.
3207
3208 Also consider that the attach is complete when we see a
3209 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
3210 target extended-remote report it instead of a SIGSTOP
3211 (e.g. gdbserver). We already rely on SIGTRAP being our
3212 signal, so this is no exception.
3213
3214 Also consider that the attach is complete when we see a
3215 TARGET_SIGNAL_0. In non-stop mode, GDB will explicitly tell
3216 the target to stop all threads of the inferior, in case the
3217 low level attach operation doesn't stop them implicitly. If
3218 they weren't stopped implicitly, then the stub will report a
3219 TARGET_SIGNAL_0, meaning: stopped for no particular reason
3220 other than GDB's request. */
3221 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3222 && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3223 || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3224 || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3225 {
3226 stop_stepping (ecs);
3227 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3228 return;
3229 }
3230
3231 /* See if there is a breakpoint at the current PC. */
3232 ecs->event_thread->stop_bpstat = bpstat_stop_status (stop_pc, ecs->ptid);
3233
3234 /* Following in case break condition called a
3235 function. */
3236 stop_print_frame = 1;
3237
3238 /* NOTE: cagney/2003-03-29: These two checks for a random signal
3239 at one stage in the past included checks for an inferior
3240 function call's call dummy's return breakpoint. The original
3241 comment, that went with the test, read:
3242
3243 ``End of a stack dummy. Some systems (e.g. Sony news) give
3244 another signal besides SIGTRAP, so check here as well as
3245 above.''
3246
3247 If someone ever tries to get call dummys on a
3248 non-executable stack to work (where the target would stop
3249 with something like a SIGSEGV), then those tests might need
3250 to be re-instated. Given, however, that the tests were only
3251 enabled when momentary breakpoints were not being used, I
3252 suspect that it won't be the case.
3253
3254 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3255 be necessary for call dummies on a non-executable stack on
3256 SPARC. */
3257
3258 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3259 ecs->random_signal
3260 = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3261 || ecs->event_thread->trap_expected
3262 || (ecs->event_thread->step_range_end
3263 && ecs->event_thread->step_resume_breakpoint == NULL));
3264 else
3265 {
3266 ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3267 if (!ecs->random_signal)
3268 ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3269 }
3270 }
3271
3272 /* When we reach this point, we've pretty much decided
3273 that the reason for stopping must've been a random
3274 (unexpected) signal. */
3275
3276 else
3277 ecs->random_signal = 1;
3278
3279 process_event_stop_test:
3280
3281 /* Re-fetch current thread's frame in case we did a
3282 "goto process_event_stop_test" above. */
3283 frame = get_current_frame ();
3284 gdbarch = get_frame_arch (frame);
3285
3286 /* For the program's own signals, act according to
3287 the signal handling tables. */
3288
3289 if (ecs->random_signal)
3290 {
3291 /* Signal not for debugging purposes. */
3292 int printed = 0;
3293
3294 if (debug_infrun)
3295 fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3296 ecs->event_thread->stop_signal);
3297
3298 stopped_by_random_signal = 1;
3299
3300 if (signal_print[ecs->event_thread->stop_signal])
3301 {
3302 printed = 1;
3303 target_terminal_ours_for_output ();
3304 print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3305 }
3306 /* Always stop on signals if we're either just gaining control
3307 of the program, or the user explicitly requested this thread
3308 to remain stopped. */
3309 if (stop_soon != NO_STOP_QUIETLY
3310 || ecs->event_thread->stop_requested
3311 || signal_stop_state (ecs->event_thread->stop_signal))
3312 {
3313 stop_stepping (ecs);
3314 return;
3315 }
3316 /* If not going to stop, give terminal back
3317 if we took it away. */
3318 else if (printed)
3319 target_terminal_inferior ();
3320
3321 /* Clear the signal if it should not be passed. */
3322 if (signal_program[ecs->event_thread->stop_signal] == 0)
3323 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3324
3325 if (ecs->event_thread->prev_pc == stop_pc
3326 && ecs->event_thread->trap_expected
3327 && ecs->event_thread->step_resume_breakpoint == NULL)
3328 {
3329 /* We were just starting a new sequence, attempting to
3330 single-step off of a breakpoint and expecting a SIGTRAP.
3331 Instead this signal arrives. This signal will take us out
3332 of the stepping range so GDB needs to remember to, when
3333 the signal handler returns, resume stepping off that
3334 breakpoint. */
3335 /* To simplify things, "continue" is forced to use the same
3336 code paths as single-step - set a breakpoint at the
3337 signal return address and then, once hit, step off that
3338 breakpoint. */
3339 if (debug_infrun)
3340 fprintf_unfiltered (gdb_stdlog,
3341 "infrun: signal arrived while stepping over "
3342 "breakpoint\n");
3343
3344 insert_step_resume_breakpoint_at_frame (frame);
3345 ecs->event_thread->step_after_step_resume_breakpoint = 1;
3346 keep_going (ecs);
3347 return;
3348 }
3349
3350 if (ecs->event_thread->step_range_end != 0
3351 && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
3352 && (ecs->event_thread->step_range_start <= stop_pc
3353 && stop_pc < ecs->event_thread->step_range_end)
3354 && frame_id_eq (get_stack_frame_id (frame),
3355 ecs->event_thread->step_stack_frame_id)
3356 && ecs->event_thread->step_resume_breakpoint == NULL)
3357 {
3358 /* The inferior is about to take a signal that will take it
3359 out of the single step range. Set a breakpoint at the
3360 current PC (which is presumably where the signal handler
3361 will eventually return) and then allow the inferior to
3362 run free.
3363
3364 Note that this is only needed for a signal delivered
3365 while in the single-step range. Nested signals aren't a
3366 problem as they eventually all return. */
3367 if (debug_infrun)
3368 fprintf_unfiltered (gdb_stdlog,
3369 "infrun: signal may take us out of "
3370 "single-step range\n");
3371
3372 insert_step_resume_breakpoint_at_frame (frame);
3373 keep_going (ecs);
3374 return;
3375 }
3376
3377 /* Note: step_resume_breakpoint may be non-NULL. This occures
3378 when either there's a nested signal, or when there's a
3379 pending signal enabled just as the signal handler returns
3380 (leaving the inferior at the step-resume-breakpoint without
3381 actually executing it). Either way continue until the
3382 breakpoint is really hit. */
3383 keep_going (ecs);
3384 return;
3385 }
3386
3387 /* Handle cases caused by hitting a breakpoint. */
3388 {
3389 CORE_ADDR jmp_buf_pc;
3390 struct bpstat_what what;
3391
3392 what = bpstat_what (ecs->event_thread->stop_bpstat);
3393
3394 if (what.call_dummy)
3395 {
3396 stop_stack_dummy = 1;
3397 }
3398
3399 switch (what.main_action)
3400 {
3401 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
3402 /* If we hit the breakpoint at longjmp while stepping, we
3403 install a momentary breakpoint at the target of the
3404 jmp_buf. */
3405
3406 if (debug_infrun)
3407 fprintf_unfiltered (gdb_stdlog,
3408 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
3409
3410 ecs->event_thread->stepping_over_breakpoint = 1;
3411
3412 if (!gdbarch_get_longjmp_target_p (gdbarch)
3413 || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
3414 {
3415 if (debug_infrun)
3416 fprintf_unfiltered (gdb_stdlog, "\
3417 infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
3418 keep_going (ecs);
3419 return;
3420 }
3421
3422 /* We're going to replace the current step-resume breakpoint
3423 with a longjmp-resume breakpoint. */
3424 delete_step_resume_breakpoint (ecs->event_thread);
3425
3426 /* Insert a breakpoint at resume address. */
3427 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
3428
3429 keep_going (ecs);
3430 return;
3431
3432 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
3433 if (debug_infrun)
3434 fprintf_unfiltered (gdb_stdlog,
3435 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
3436
3437 gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
3438 delete_step_resume_breakpoint (ecs->event_thread);
3439
3440 ecs->event_thread->stop_step = 1;
3441 print_stop_reason (END_STEPPING_RANGE, 0);
3442 stop_stepping (ecs);
3443 return;
3444
3445 case BPSTAT_WHAT_SINGLE:
3446 if (debug_infrun)
3447 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
3448 ecs->event_thread->stepping_over_breakpoint = 1;
3449 /* Still need to check other stuff, at least the case
3450 where we are stepping and step out of the right range. */
3451 break;
3452
3453 case BPSTAT_WHAT_STOP_NOISY:
3454 if (debug_infrun)
3455 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
3456 stop_print_frame = 1;
3457
3458 /* We are about to nuke the step_resume_breakpointt via the
3459 cleanup chain, so no need to worry about it here. */
3460
3461 stop_stepping (ecs);
3462 return;
3463
3464 case BPSTAT_WHAT_STOP_SILENT:
3465 if (debug_infrun)
3466 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
3467 stop_print_frame = 0;
3468
3469 /* We are about to nuke the step_resume_breakpoin via the
3470 cleanup chain, so no need to worry about it here. */
3471
3472 stop_stepping (ecs);
3473 return;
3474
3475 case BPSTAT_WHAT_STEP_RESUME:
3476 if (debug_infrun)
3477 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
3478
3479 delete_step_resume_breakpoint (ecs->event_thread);
3480 if (ecs->event_thread->step_after_step_resume_breakpoint)
3481 {
3482 /* Back when the step-resume breakpoint was inserted, we
3483 were trying to single-step off a breakpoint. Go back
3484 to doing that. */
3485 ecs->event_thread->step_after_step_resume_breakpoint = 0;
3486 ecs->event_thread->stepping_over_breakpoint = 1;
3487 keep_going (ecs);
3488 return;
3489 }
3490 if (stop_pc == ecs->stop_func_start
3491 && execution_direction == EXEC_REVERSE)
3492 {
3493 /* We are stepping over a function call in reverse, and
3494 just hit the step-resume breakpoint at the start
3495 address of the function. Go back to single-stepping,
3496 which should take us back to the function call. */
3497 ecs->event_thread->stepping_over_breakpoint = 1;
3498 keep_going (ecs);
3499 return;
3500 }
3501 break;
3502
3503 case BPSTAT_WHAT_CHECK_SHLIBS:
3504 {
3505 if (debug_infrun)
3506 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_CHECK_SHLIBS\n");
3507
3508 /* Check for any newly added shared libraries if we're
3509 supposed to be adding them automatically. Switch
3510 terminal for any messages produced by
3511 breakpoint_re_set. */
3512 target_terminal_ours_for_output ();
3513 /* NOTE: cagney/2003-11-25: Make certain that the target
3514 stack's section table is kept up-to-date. Architectures,
3515 (e.g., PPC64), use the section table to perform
3516 operations such as address => section name and hence
3517 require the table to contain all sections (including
3518 those found in shared libraries). */
3519 #ifdef SOLIB_ADD
3520 SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3521 #else
3522 solib_add (NULL, 0, &current_target, auto_solib_add);
3523 #endif
3524 target_terminal_inferior ();
3525
3526 /* If requested, stop when the dynamic linker notifies
3527 gdb of events. This allows the user to get control
3528 and place breakpoints in initializer routines for
3529 dynamically loaded objects (among other things). */
3530 if (stop_on_solib_events || stop_stack_dummy)
3531 {
3532 stop_stepping (ecs);
3533 return;
3534 }
3535 else
3536 {
3537 /* We want to step over this breakpoint, then keep going. */
3538 ecs->event_thread->stepping_over_breakpoint = 1;
3539 break;
3540 }
3541 }
3542 break;
3543
3544 case BPSTAT_WHAT_LAST:
3545 /* Not a real code, but listed here to shut up gcc -Wall. */
3546
3547 case BPSTAT_WHAT_KEEP_CHECKING:
3548 break;
3549 }
3550 }
3551
3552 /* We come here if we hit a breakpoint but should not
3553 stop for it. Possibly we also were stepping
3554 and should stop for that. So fall through and
3555 test for stepping. But, if not stepping,
3556 do not stop. */
3557
3558 /* In all-stop mode, if we're currently stepping but have stopped in
3559 some other thread, we need to switch back to the stepped thread. */
3560 if (!non_stop)
3561 {
3562 struct thread_info *tp;
3563 tp = iterate_over_threads (currently_stepping_or_nexting_callback,
3564 ecs->event_thread);
3565 if (tp)
3566 {
3567 /* However, if the current thread is blocked on some internal
3568 breakpoint, and we simply need to step over that breakpoint
3569 to get it going again, do that first. */
3570 if ((ecs->event_thread->trap_expected
3571 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
3572 || ecs->event_thread->stepping_over_breakpoint)
3573 {
3574 keep_going (ecs);
3575 return;
3576 }
3577
3578 /* If the stepping thread exited, then don't try to switch
3579 back and resume it, which could fail in several different
3580 ways depending on the target. Instead, just keep going.
3581
3582 We can find a stepping dead thread in the thread list in
3583 two cases:
3584
3585 - The target supports thread exit events, and when the
3586 target tries to delete the thread from the thread list,
3587 inferior_ptid pointed at the exiting thread. In such
3588 case, calling delete_thread does not really remove the
3589 thread from the list; instead, the thread is left listed,
3590 with 'exited' state.
3591
3592 - The target's debug interface does not support thread
3593 exit events, and so we have no idea whatsoever if the
3594 previously stepping thread is still alive. For that
3595 reason, we need to synchronously query the target
3596 now. */
3597 if (is_exited (tp->ptid)
3598 || !target_thread_alive (tp->ptid))
3599 {
3600 if (debug_infrun)
3601 fprintf_unfiltered (gdb_stdlog, "\
3602 infrun: not switching back to stepped thread, it has vanished\n");
3603
3604 delete_thread (tp->ptid);
3605 keep_going (ecs);
3606 return;
3607 }
3608
3609 /* Otherwise, we no longer expect a trap in the current thread.
3610 Clear the trap_expected flag before switching back -- this is
3611 what keep_going would do as well, if we called it. */
3612 ecs->event_thread->trap_expected = 0;
3613
3614 if (debug_infrun)
3615 fprintf_unfiltered (gdb_stdlog,
3616 "infrun: switching back to stepped thread\n");
3617
3618 ecs->event_thread = tp;
3619 ecs->ptid = tp->ptid;
3620 context_switch (ecs->ptid);
3621 keep_going (ecs);
3622 return;
3623 }
3624 }
3625
3626 /* Are we stepping to get the inferior out of the dynamic linker's
3627 hook (and possibly the dld itself) after catching a shlib
3628 event? */
3629 if (ecs->event_thread->stepping_through_solib_after_catch)
3630 {
3631 #if defined(SOLIB_ADD)
3632 /* Have we reached our destination? If not, keep going. */
3633 if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
3634 {
3635 if (debug_infrun)
3636 fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
3637 ecs->event_thread->stepping_over_breakpoint = 1;
3638 keep_going (ecs);
3639 return;
3640 }
3641 #endif
3642 if (debug_infrun)
3643 fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
3644 /* Else, stop and report the catchpoint(s) whose triggering
3645 caused us to begin stepping. */
3646 ecs->event_thread->stepping_through_solib_after_catch = 0;
3647 bpstat_clear (&ecs->event_thread->stop_bpstat);
3648 ecs->event_thread->stop_bpstat
3649 = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
3650 bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
3651 stop_print_frame = 1;
3652 stop_stepping (ecs);
3653 return;
3654 }
3655
3656 if (ecs->event_thread->step_resume_breakpoint)
3657 {
3658 if (debug_infrun)
3659 fprintf_unfiltered (gdb_stdlog,
3660 "infrun: step-resume breakpoint is inserted\n");
3661
3662 /* Having a step-resume breakpoint overrides anything
3663 else having to do with stepping commands until
3664 that breakpoint is reached. */
3665 keep_going (ecs);
3666 return;
3667 }
3668
3669 if (ecs->event_thread->step_range_end == 0)
3670 {
3671 if (debug_infrun)
3672 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
3673 /* Likewise if we aren't even stepping. */
3674 keep_going (ecs);
3675 return;
3676 }
3677
3678 /* If stepping through a line, keep going if still within it.
3679
3680 Note that step_range_end is the address of the first instruction
3681 beyond the step range, and NOT the address of the last instruction
3682 within it!
3683
3684 Note also that during reverse execution, we may be stepping
3685 through a function epilogue and therefore must detect when
3686 the current-frame changes in the middle of a line. */
3687
3688 if (stop_pc >= ecs->event_thread->step_range_start
3689 && stop_pc < ecs->event_thread->step_range_end
3690 && (execution_direction != EXEC_REVERSE
3691 || frame_id_eq (get_frame_id (frame),
3692 ecs->event_thread->step_frame_id)))
3693 {
3694 if (debug_infrun)
3695 fprintf_unfiltered
3696 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
3697 paddress (gdbarch, ecs->event_thread->step_range_start),
3698 paddress (gdbarch, ecs->event_thread->step_range_end));
3699
3700 /* When stepping backward, stop at beginning of line range
3701 (unless it's the function entry point, in which case
3702 keep going back to the call point). */
3703 if (stop_pc == ecs->event_thread->step_range_start
3704 && stop_pc != ecs->stop_func_start
3705 && execution_direction == EXEC_REVERSE)
3706 {
3707 ecs->event_thread->stop_step = 1;
3708 print_stop_reason (END_STEPPING_RANGE, 0);
3709 stop_stepping (ecs);
3710 }
3711 else
3712 keep_going (ecs);
3713
3714 return;
3715 }
3716
3717 /* We stepped out of the stepping range. */
3718
3719 /* If we are stepping at the source level and entered the runtime
3720 loader dynamic symbol resolution code...
3721
3722 EXEC_FORWARD: we keep on single stepping until we exit the run
3723 time loader code and reach the callee's address.
3724
3725 EXEC_REVERSE: we've already executed the callee (backward), and
3726 the runtime loader code is handled just like any other
3727 undebuggable function call. Now we need only keep stepping
3728 backward through the trampoline code, and that's handled further
3729 down, so there is nothing for us to do here. */
3730
3731 if (execution_direction != EXEC_REVERSE
3732 && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3733 && in_solib_dynsym_resolve_code (stop_pc))
3734 {
3735 CORE_ADDR pc_after_resolver =
3736 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
3737
3738 if (debug_infrun)
3739 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
3740
3741 if (pc_after_resolver)
3742 {
3743 /* Set up a step-resume breakpoint at the address
3744 indicated by SKIP_SOLIB_RESOLVER. */
3745 struct symtab_and_line sr_sal;
3746 init_sal (&sr_sal);
3747 sr_sal.pc = pc_after_resolver;
3748
3749 insert_step_resume_breakpoint_at_sal (gdbarch,
3750 sr_sal, null_frame_id);
3751 }
3752
3753 keep_going (ecs);
3754 return;
3755 }
3756
3757 if (ecs->event_thread->step_range_end != 1
3758 && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3759 || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
3760 && get_frame_type (frame) == SIGTRAMP_FRAME)
3761 {
3762 if (debug_infrun)
3763 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
3764 /* The inferior, while doing a "step" or "next", has ended up in
3765 a signal trampoline (either by a signal being delivered or by
3766 the signal handler returning). Just single-step until the
3767 inferior leaves the trampoline (either by calling the handler
3768 or returning). */
3769 keep_going (ecs);
3770 return;
3771 }
3772
3773 /* Check for subroutine calls. The check for the current frame
3774 equalling the step ID is not necessary - the check of the
3775 previous frame's ID is sufficient - but it is a common case and
3776 cheaper than checking the previous frame's ID.
3777
3778 NOTE: frame_id_eq will never report two invalid frame IDs as
3779 being equal, so to get into this block, both the current and
3780 previous frame must have valid frame IDs. */
3781 if (!frame_id_eq (get_stack_frame_id (frame),
3782 ecs->event_thread->step_stack_frame_id)
3783 && frame_id_eq (frame_unwind_caller_id (frame),
3784 ecs->event_thread->step_stack_frame_id))
3785 {
3786 CORE_ADDR real_stop_pc;
3787
3788 if (debug_infrun)
3789 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
3790
3791 if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
3792 || ((ecs->event_thread->step_range_end == 1)
3793 && in_prologue (gdbarch, ecs->event_thread->prev_pc,
3794 ecs->stop_func_start)))
3795 {
3796 /* I presume that step_over_calls is only 0 when we're
3797 supposed to be stepping at the assembly language level
3798 ("stepi"). Just stop. */
3799 /* Also, maybe we just did a "nexti" inside a prolog, so we
3800 thought it was a subroutine call but it was not. Stop as
3801 well. FENN */
3802 /* And this works the same backward as frontward. MVS */
3803 ecs->event_thread->stop_step = 1;
3804 print_stop_reason (END_STEPPING_RANGE, 0);
3805 stop_stepping (ecs);
3806 return;
3807 }
3808
3809 /* Reverse stepping through solib trampolines. */
3810
3811 if (execution_direction == EXEC_REVERSE
3812 && ecs->event_thread->step_over_calls != STEP_OVER_NONE
3813 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
3814 || (ecs->stop_func_start == 0
3815 && in_solib_dynsym_resolve_code (stop_pc))))
3816 {
3817 /* Any solib trampoline code can be handled in reverse
3818 by simply continuing to single-step. We have already
3819 executed the solib function (backwards), and a few
3820 steps will take us back through the trampoline to the
3821 caller. */
3822 keep_going (ecs);
3823 return;
3824 }
3825
3826 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
3827 {
3828 /* We're doing a "next".
3829
3830 Normal (forward) execution: set a breakpoint at the
3831 callee's return address (the address at which the caller
3832 will resume).
3833
3834 Reverse (backward) execution. set the step-resume
3835 breakpoint at the start of the function that we just
3836 stepped into (backwards), and continue to there. When we
3837 get there, we'll need to single-step back to the caller. */
3838
3839 if (execution_direction == EXEC_REVERSE)
3840 {
3841 struct symtab_and_line sr_sal;
3842
3843 /* Normal function call return (static or dynamic). */
3844 init_sal (&sr_sal);
3845 sr_sal.pc = ecs->stop_func_start;
3846 insert_step_resume_breakpoint_at_sal (gdbarch,
3847 sr_sal, null_frame_id);
3848 }
3849 else
3850 insert_step_resume_breakpoint_at_caller (frame);
3851
3852 keep_going (ecs);
3853 return;
3854 }
3855
3856 /* If we are in a function call trampoline (a stub between the
3857 calling routine and the real function), locate the real
3858 function. That's what tells us (a) whether we want to step
3859 into it at all, and (b) what prologue we want to run to the
3860 end of, if we do step into it. */
3861 real_stop_pc = skip_language_trampoline (frame, stop_pc);
3862 if (real_stop_pc == 0)
3863 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
3864 if (real_stop_pc != 0)
3865 ecs->stop_func_start = real_stop_pc;
3866
3867 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
3868 {
3869 struct symtab_and_line sr_sal;
3870 init_sal (&sr_sal);
3871 sr_sal.pc = ecs->stop_func_start;
3872
3873 insert_step_resume_breakpoint_at_sal (gdbarch,
3874 sr_sal, null_frame_id);
3875 keep_going (ecs);
3876 return;
3877 }
3878
3879 /* If we have line number information for the function we are
3880 thinking of stepping into, step into it.
3881
3882 If there are several symtabs at that PC (e.g. with include
3883 files), just want to know whether *any* of them have line
3884 numbers. find_pc_line handles this. */
3885 {
3886 struct symtab_and_line tmp_sal;
3887
3888 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
3889 if (tmp_sal.line != 0)
3890 {
3891 if (execution_direction == EXEC_REVERSE)
3892 handle_step_into_function_backward (gdbarch, ecs);
3893 else
3894 handle_step_into_function (gdbarch, ecs);
3895 return;
3896 }
3897 }
3898
3899 /* If we have no line number and the step-stop-if-no-debug is
3900 set, we stop the step so that the user has a chance to switch
3901 in assembly mode. */
3902 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
3903 && step_stop_if_no_debug)
3904 {
3905 ecs->event_thread->stop_step = 1;
3906 print_stop_reason (END_STEPPING_RANGE, 0);
3907 stop_stepping (ecs);
3908 return;
3909 }
3910
3911 if (execution_direction == EXEC_REVERSE)
3912 {
3913 /* Set a breakpoint at callee's start address.
3914 From there we can step once and be back in the caller. */
3915 struct symtab_and_line sr_sal;
3916 init_sal (&sr_sal);
3917 sr_sal.pc = ecs->stop_func_start;
3918 insert_step_resume_breakpoint_at_sal (gdbarch,
3919 sr_sal, null_frame_id);
3920 }
3921 else
3922 /* Set a breakpoint at callee's return address (the address
3923 at which the caller will resume). */
3924 insert_step_resume_breakpoint_at_caller (frame);
3925
3926 keep_going (ecs);
3927 return;
3928 }
3929
3930 /* Reverse stepping through solib trampolines. */
3931
3932 if (execution_direction == EXEC_REVERSE
3933 && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
3934 {
3935 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
3936 || (ecs->stop_func_start == 0
3937 && in_solib_dynsym_resolve_code (stop_pc)))
3938 {
3939 /* Any solib trampoline code can be handled in reverse
3940 by simply continuing to single-step. We have already
3941 executed the solib function (backwards), and a few
3942 steps will take us back through the trampoline to the
3943 caller. */
3944 keep_going (ecs);
3945 return;
3946 }
3947 else if (in_solib_dynsym_resolve_code (stop_pc))
3948 {
3949 /* Stepped backward into the solib dynsym resolver.
3950 Set a breakpoint at its start and continue, then
3951 one more step will take us out. */
3952 struct symtab_and_line sr_sal;
3953 init_sal (&sr_sal);
3954 sr_sal.pc = ecs->stop_func_start;
3955 insert_step_resume_breakpoint_at_sal (gdbarch,
3956 sr_sal, null_frame_id);
3957 keep_going (ecs);
3958 return;
3959 }
3960 }
3961
3962 /* If we're in the return path from a shared library trampoline,
3963 we want to proceed through the trampoline when stepping. */
3964 if (gdbarch_in_solib_return_trampoline (gdbarch,
3965 stop_pc, ecs->stop_func_name))
3966 {
3967 /* Determine where this trampoline returns. */
3968 CORE_ADDR real_stop_pc;
3969 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
3970
3971 if (debug_infrun)
3972 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
3973
3974 /* Only proceed through if we know where it's going. */
3975 if (real_stop_pc)
3976 {
3977 /* And put the step-breakpoint there and go until there. */
3978 struct symtab_and_line sr_sal;
3979
3980 init_sal (&sr_sal); /* initialize to zeroes */
3981 sr_sal.pc = real_stop_pc;
3982 sr_sal.section = find_pc_overlay (sr_sal.pc);
3983
3984 /* Do not specify what the fp should be when we stop since
3985 on some machines the prologue is where the new fp value
3986 is established. */
3987 insert_step_resume_breakpoint_at_sal (gdbarch,
3988 sr_sal, null_frame_id);
3989
3990 /* Restart without fiddling with the step ranges or
3991 other state. */
3992 keep_going (ecs);
3993 return;
3994 }
3995 }
3996
3997 stop_pc_sal = find_pc_line (stop_pc, 0);
3998
3999 /* NOTE: tausq/2004-05-24: This if block used to be done before all
4000 the trampoline processing logic, however, there are some trampolines
4001 that have no names, so we should do trampoline handling first. */
4002 if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4003 && ecs->stop_func_name == NULL
4004 && stop_pc_sal.line == 0)
4005 {
4006 if (debug_infrun)
4007 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4008
4009 /* The inferior just stepped into, or returned to, an
4010 undebuggable function (where there is no debugging information
4011 and no line number corresponding to the address where the
4012 inferior stopped). Since we want to skip this kind of code,
4013 we keep going until the inferior returns from this
4014 function - unless the user has asked us not to (via
4015 set step-mode) or we no longer know how to get back
4016 to the call site. */
4017 if (step_stop_if_no_debug
4018 || !frame_id_p (frame_unwind_caller_id (frame)))
4019 {
4020 /* If we have no line number and the step-stop-if-no-debug
4021 is set, we stop the step so that the user has a chance to
4022 switch in assembly mode. */
4023 ecs->event_thread->stop_step = 1;
4024 print_stop_reason (END_STEPPING_RANGE, 0);
4025 stop_stepping (ecs);
4026 return;
4027 }
4028 else
4029 {
4030 /* Set a breakpoint at callee's return address (the address
4031 at which the caller will resume). */
4032 insert_step_resume_breakpoint_at_caller (frame);
4033 keep_going (ecs);
4034 return;
4035 }
4036 }
4037
4038 if (ecs->event_thread->step_range_end == 1)
4039 {
4040 /* It is stepi or nexti. We always want to stop stepping after
4041 one instruction. */
4042 if (debug_infrun)
4043 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4044 ecs->event_thread->stop_step = 1;
4045 print_stop_reason (END_STEPPING_RANGE, 0);
4046 stop_stepping (ecs);
4047 return;
4048 }
4049
4050 if (stop_pc_sal.line == 0)
4051 {
4052 /* We have no line number information. That means to stop
4053 stepping (does this always happen right after one instruction,
4054 when we do "s" in a function with no line numbers,
4055 or can this happen as a result of a return or longjmp?). */
4056 if (debug_infrun)
4057 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4058 ecs->event_thread->stop_step = 1;
4059 print_stop_reason (END_STEPPING_RANGE, 0);
4060 stop_stepping (ecs);
4061 return;
4062 }
4063
4064 /* Look for "calls" to inlined functions, part one. If the inline
4065 frame machinery detected some skipped call sites, we have entered
4066 a new inline function. */
4067
4068 if (frame_id_eq (get_frame_id (get_current_frame ()),
4069 ecs->event_thread->step_frame_id)
4070 && inline_skipped_frames (ecs->ptid))
4071 {
4072 struct symtab_and_line call_sal;
4073
4074 if (debug_infrun)
4075 fprintf_unfiltered (gdb_stdlog,
4076 "infrun: stepped into inlined function\n");
4077
4078 find_frame_sal (get_current_frame (), &call_sal);
4079
4080 if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4081 {
4082 /* For "step", we're going to stop. But if the call site
4083 for this inlined function is on the same source line as
4084 we were previously stepping, go down into the function
4085 first. Otherwise stop at the call site. */
4086
4087 if (call_sal.line == ecs->event_thread->current_line
4088 && call_sal.symtab == ecs->event_thread->current_symtab)
4089 step_into_inline_frame (ecs->ptid);
4090
4091 ecs->event_thread->stop_step = 1;
4092 print_stop_reason (END_STEPPING_RANGE, 0);
4093 stop_stepping (ecs);
4094 return;
4095 }
4096 else
4097 {
4098 /* For "next", we should stop at the call site if it is on a
4099 different source line. Otherwise continue through the
4100 inlined function. */
4101 if (call_sal.line == ecs->event_thread->current_line
4102 && call_sal.symtab == ecs->event_thread->current_symtab)
4103 keep_going (ecs);
4104 else
4105 {
4106 ecs->event_thread->stop_step = 1;
4107 print_stop_reason (END_STEPPING_RANGE, 0);
4108 stop_stepping (ecs);
4109 }
4110 return;
4111 }
4112 }
4113
4114 /* Look for "calls" to inlined functions, part two. If we are still
4115 in the same real function we were stepping through, but we have
4116 to go further up to find the exact frame ID, we are stepping
4117 through a more inlined call beyond its call site. */
4118
4119 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4120 && !frame_id_eq (get_frame_id (get_current_frame ()),
4121 ecs->event_thread->step_frame_id)
4122 && stepped_in_from (get_current_frame (),
4123 ecs->event_thread->step_frame_id))
4124 {
4125 if (debug_infrun)
4126 fprintf_unfiltered (gdb_stdlog,
4127 "infrun: stepping through inlined function\n");
4128
4129 if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4130 keep_going (ecs);
4131 else
4132 {
4133 ecs->event_thread->stop_step = 1;
4134 print_stop_reason (END_STEPPING_RANGE, 0);
4135 stop_stepping (ecs);
4136 }
4137 return;
4138 }
4139
4140 if ((stop_pc == stop_pc_sal.pc)
4141 && (ecs->event_thread->current_line != stop_pc_sal.line
4142 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4143 {
4144 /* We are at the start of a different line. So stop. Note that
4145 we don't stop if we step into the middle of a different line.
4146 That is said to make things like for (;;) statements work
4147 better. */
4148 if (debug_infrun)
4149 fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4150 ecs->event_thread->stop_step = 1;
4151 print_stop_reason (END_STEPPING_RANGE, 0);
4152 stop_stepping (ecs);
4153 return;
4154 }
4155
4156 /* We aren't done stepping.
4157
4158 Optimize by setting the stepping range to the line.
4159 (We might not be in the original line, but if we entered a
4160 new line in mid-statement, we continue stepping. This makes
4161 things like for(;;) statements work better.) */
4162
4163 ecs->event_thread->step_range_start = stop_pc_sal.pc;
4164 ecs->event_thread->step_range_end = stop_pc_sal.end;
4165 set_step_info (frame, stop_pc_sal);
4166
4167 if (debug_infrun)
4168 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4169 keep_going (ecs);
4170 }
4171
4172 /* Is thread TP in the middle of single-stepping? */
4173
4174 static int
4175 currently_stepping (struct thread_info *tp)
4176 {
4177 return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4178 || tp->trap_expected
4179 || tp->stepping_through_solib_after_catch
4180 || bpstat_should_step ());
4181 }
4182
4183 /* Returns true if any thread *but* the one passed in "data" is in the
4184 middle of stepping or of handling a "next". */
4185
4186 static int
4187 currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4188 {
4189 if (tp == data)
4190 return 0;
4191
4192 return (tp->step_range_end
4193 || tp->trap_expected
4194 || tp->stepping_through_solib_after_catch);
4195 }
4196
4197 /* Inferior has stepped into a subroutine call with source code that
4198 we should not step over. Do step to the first line of code in
4199 it. */
4200
4201 static void
4202 handle_step_into_function (struct gdbarch *gdbarch,
4203 struct execution_control_state *ecs)
4204 {
4205 struct symtab *s;
4206 struct symtab_and_line stop_func_sal, sr_sal;
4207
4208 s = find_pc_symtab (stop_pc);
4209 if (s && s->language != language_asm)
4210 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4211 ecs->stop_func_start);
4212
4213 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4214 /* Use the step_resume_break to step until the end of the prologue,
4215 even if that involves jumps (as it seems to on the vax under
4216 4.2). */
4217 /* If the prologue ends in the middle of a source line, continue to
4218 the end of that source line (if it is still within the function).
4219 Otherwise, just go to end of prologue. */
4220 if (stop_func_sal.end
4221 && stop_func_sal.pc != ecs->stop_func_start
4222 && stop_func_sal.end < ecs->stop_func_end)
4223 ecs->stop_func_start = stop_func_sal.end;
4224
4225 /* Architectures which require breakpoint adjustment might not be able
4226 to place a breakpoint at the computed address. If so, the test
4227 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
4228 ecs->stop_func_start to an address at which a breakpoint may be
4229 legitimately placed.
4230
4231 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
4232 made, GDB will enter an infinite loop when stepping through
4233 optimized code consisting of VLIW instructions which contain
4234 subinstructions corresponding to different source lines. On
4235 FR-V, it's not permitted to place a breakpoint on any but the
4236 first subinstruction of a VLIW instruction. When a breakpoint is
4237 set, GDB will adjust the breakpoint address to the beginning of
4238 the VLIW instruction. Thus, we need to make the corresponding
4239 adjustment here when computing the stop address. */
4240
4241 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4242 {
4243 ecs->stop_func_start
4244 = gdbarch_adjust_breakpoint_address (gdbarch,
4245 ecs->stop_func_start);
4246 }
4247
4248 if (ecs->stop_func_start == stop_pc)
4249 {
4250 /* We are already there: stop now. */
4251 ecs->event_thread->stop_step = 1;
4252 print_stop_reason (END_STEPPING_RANGE, 0);
4253 stop_stepping (ecs);
4254 return;
4255 }
4256 else
4257 {
4258 /* Put the step-breakpoint there and go until there. */
4259 init_sal (&sr_sal); /* initialize to zeroes */
4260 sr_sal.pc = ecs->stop_func_start;
4261 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4262
4263 /* Do not specify what the fp should be when we stop since on
4264 some machines the prologue is where the new fp value is
4265 established. */
4266 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4267
4268 /* And make sure stepping stops right away then. */
4269 ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4270 }
4271 keep_going (ecs);
4272 }
4273
4274 /* Inferior has stepped backward into a subroutine call with source
4275 code that we should not step over. Do step to the beginning of the
4276 last line of code in it. */
4277
4278 static void
4279 handle_step_into_function_backward (struct gdbarch *gdbarch,
4280 struct execution_control_state *ecs)
4281 {
4282 struct symtab *s;
4283 struct symtab_and_line stop_func_sal, sr_sal;
4284
4285 s = find_pc_symtab (stop_pc);
4286 if (s && s->language != language_asm)
4287 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4288 ecs->stop_func_start);
4289
4290 stop_func_sal = find_pc_line (stop_pc, 0);
4291
4292 /* OK, we're just going to keep stepping here. */
4293 if (stop_func_sal.pc == stop_pc)
4294 {
4295 /* We're there already. Just stop stepping now. */
4296 ecs->event_thread->stop_step = 1;
4297 print_stop_reason (END_STEPPING_RANGE, 0);
4298 stop_stepping (ecs);
4299 }
4300 else
4301 {
4302 /* Else just reset the step range and keep going.
4303 No step-resume breakpoint, they don't work for
4304 epilogues, which can have multiple entry paths. */
4305 ecs->event_thread->step_range_start = stop_func_sal.pc;
4306 ecs->event_thread->step_range_end = stop_func_sal.end;
4307 keep_going (ecs);
4308 }
4309 return;
4310 }
4311
4312 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4313 This is used to both functions and to skip over code. */
4314
4315 static void
4316 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4317 struct symtab_and_line sr_sal,
4318 struct frame_id sr_id)
4319 {
4320 /* There should never be more than one step-resume or longjmp-resume
4321 breakpoint per thread, so we should never be setting a new
4322 step_resume_breakpoint when one is already active. */
4323 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4324
4325 if (debug_infrun)
4326 fprintf_unfiltered (gdb_stdlog,
4327 "infrun: inserting step-resume breakpoint at %s\n",
4328 paddress (gdbarch, sr_sal.pc));
4329
4330 inferior_thread ()->step_resume_breakpoint
4331 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4332 }
4333
4334 /* Insert a "step-resume breakpoint" at RETURN_FRAME.pc. This is used
4335 to skip a potential signal handler.
4336
4337 This is called with the interrupted function's frame. The signal
4338 handler, when it returns, will resume the interrupted function at
4339 RETURN_FRAME.pc. */
4340
4341 static void
4342 insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4343 {
4344 struct symtab_and_line sr_sal;
4345 struct gdbarch *gdbarch;
4346
4347 gdb_assert (return_frame != NULL);
4348 init_sal (&sr_sal); /* initialize to zeros */
4349
4350 gdbarch = get_frame_arch (return_frame);
4351 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
4352 sr_sal.section = find_pc_overlay (sr_sal.pc);
4353
4354 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4355 get_stack_frame_id (return_frame));
4356 }
4357
4358 /* Similar to insert_step_resume_breakpoint_at_frame, except
4359 but a breakpoint at the previous frame's PC. This is used to
4360 skip a function after stepping into it (for "next" or if the called
4361 function has no debugging information).
4362
4363 The current function has almost always been reached by single
4364 stepping a call or return instruction. NEXT_FRAME belongs to the
4365 current function, and the breakpoint will be set at the caller's
4366 resume address.
4367
4368 This is a separate function rather than reusing
4369 insert_step_resume_breakpoint_at_frame in order to avoid
4370 get_prev_frame, which may stop prematurely (see the implementation
4371 of frame_unwind_caller_id for an example). */
4372
4373 static void
4374 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
4375 {
4376 struct symtab_and_line sr_sal;
4377 struct gdbarch *gdbarch;
4378
4379 /* We shouldn't have gotten here if we don't know where the call site
4380 is. */
4381 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
4382
4383 init_sal (&sr_sal); /* initialize to zeros */
4384
4385 gdbarch = frame_unwind_caller_arch (next_frame);
4386 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
4387 frame_unwind_caller_pc (next_frame));
4388 sr_sal.section = find_pc_overlay (sr_sal.pc);
4389
4390 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
4391 frame_unwind_caller_id (next_frame));
4392 }
4393
4394 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
4395 new breakpoint at the target of a jmp_buf. The handling of
4396 longjmp-resume uses the same mechanisms used for handling
4397 "step-resume" breakpoints. */
4398
4399 static void
4400 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
4401 {
4402 /* There should never be more than one step-resume or longjmp-resume
4403 breakpoint per thread, so we should never be setting a new
4404 longjmp_resume_breakpoint when one is already active. */
4405 gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4406
4407 if (debug_infrun)
4408 fprintf_unfiltered (gdb_stdlog,
4409 "infrun: inserting longjmp-resume breakpoint at %s\n",
4410 paddress (gdbarch, pc));
4411
4412 inferior_thread ()->step_resume_breakpoint =
4413 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
4414 }
4415
4416 static void
4417 stop_stepping (struct execution_control_state *ecs)
4418 {
4419 if (debug_infrun)
4420 fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
4421
4422 /* Let callers know we don't want to wait for the inferior anymore. */
4423 ecs->wait_some_more = 0;
4424 }
4425
4426 /* This function handles various cases where we need to continue
4427 waiting for the inferior. */
4428 /* (Used to be the keep_going: label in the old wait_for_inferior) */
4429
4430 static void
4431 keep_going (struct execution_control_state *ecs)
4432 {
4433 /* Save the pc before execution, to compare with pc after stop. */
4434 ecs->event_thread->prev_pc
4435 = regcache_read_pc (get_thread_regcache (ecs->ptid));
4436
4437 /* If we did not do break;, it means we should keep running the
4438 inferior and not return to debugger. */
4439
4440 if (ecs->event_thread->trap_expected
4441 && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4442 {
4443 /* We took a signal (which we are supposed to pass through to
4444 the inferior, else we'd not get here) and we haven't yet
4445 gotten our trap. Simply continue. */
4446 resume (currently_stepping (ecs->event_thread),
4447 ecs->event_thread->stop_signal);
4448 }
4449 else
4450 {
4451 /* Either the trap was not expected, but we are continuing
4452 anyway (the user asked that this signal be passed to the
4453 child)
4454 -- or --
4455 The signal was SIGTRAP, e.g. it was our signal, but we
4456 decided we should resume from it.
4457
4458 We're going to run this baby now!
4459
4460 Note that insert_breakpoints won't try to re-insert
4461 already inserted breakpoints. Therefore, we don't
4462 care if breakpoints were already inserted, or not. */
4463
4464 if (ecs->event_thread->stepping_over_breakpoint)
4465 {
4466 struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
4467 if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
4468 /* Since we can't do a displaced step, we have to remove
4469 the breakpoint while we step it. To keep things
4470 simple, we remove them all. */
4471 remove_breakpoints ();
4472 }
4473 else
4474 {
4475 struct gdb_exception e;
4476 /* Stop stepping when inserting breakpoints
4477 has failed. */
4478 TRY_CATCH (e, RETURN_MASK_ERROR)
4479 {
4480 insert_breakpoints ();
4481 }
4482 if (e.reason < 0)
4483 {
4484 stop_stepping (ecs);
4485 return;
4486 }
4487 }
4488
4489 ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
4490
4491 /* Do not deliver SIGNAL_TRAP (except when the user explicitly
4492 specifies that such a signal should be delivered to the
4493 target program).
4494
4495 Typically, this would occure when a user is debugging a
4496 target monitor on a simulator: the target monitor sets a
4497 breakpoint; the simulator encounters this break-point and
4498 halts the simulation handing control to GDB; GDB, noteing
4499 that the break-point isn't valid, returns control back to the
4500 simulator; the simulator then delivers the hardware
4501 equivalent of a SIGNAL_TRAP to the program being debugged. */
4502
4503 if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
4504 && !signal_program[ecs->event_thread->stop_signal])
4505 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
4506
4507 resume (currently_stepping (ecs->event_thread),
4508 ecs->event_thread->stop_signal);
4509 }
4510
4511 prepare_to_wait (ecs);
4512 }
4513
4514 /* This function normally comes after a resume, before
4515 handle_inferior_event exits. It takes care of any last bits of
4516 housekeeping, and sets the all-important wait_some_more flag. */
4517
4518 static void
4519 prepare_to_wait (struct execution_control_state *ecs)
4520 {
4521 if (debug_infrun)
4522 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
4523
4524 /* This is the old end of the while loop. Let everybody know we
4525 want to wait for the inferior some more and get called again
4526 soon. */
4527 ecs->wait_some_more = 1;
4528 }
4529
4530 /* Print why the inferior has stopped. We always print something when
4531 the inferior exits, or receives a signal. The rest of the cases are
4532 dealt with later on in normal_stop() and print_it_typical(). Ideally
4533 there should be a call to this function from handle_inferior_event()
4534 each time stop_stepping() is called.*/
4535 static void
4536 print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
4537 {
4538 switch (stop_reason)
4539 {
4540 case END_STEPPING_RANGE:
4541 /* We are done with a step/next/si/ni command. */
4542 /* For now print nothing. */
4543 /* Print a message only if not in the middle of doing a "step n"
4544 operation for n > 1 */
4545 if (!inferior_thread ()->step_multi
4546 || !inferior_thread ()->stop_step)
4547 if (ui_out_is_mi_like_p (uiout))
4548 ui_out_field_string
4549 (uiout, "reason",
4550 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
4551 break;
4552 case SIGNAL_EXITED:
4553 /* The inferior was terminated by a signal. */
4554 annotate_signalled ();
4555 if (ui_out_is_mi_like_p (uiout))
4556 ui_out_field_string
4557 (uiout, "reason",
4558 async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
4559 ui_out_text (uiout, "\nProgram terminated with signal ");
4560 annotate_signal_name ();
4561 ui_out_field_string (uiout, "signal-name",
4562 target_signal_to_name (stop_info));
4563 annotate_signal_name_end ();
4564 ui_out_text (uiout, ", ");
4565 annotate_signal_string ();
4566 ui_out_field_string (uiout, "signal-meaning",
4567 target_signal_to_string (stop_info));
4568 annotate_signal_string_end ();
4569 ui_out_text (uiout, ".\n");
4570 ui_out_text (uiout, "The program no longer exists.\n");
4571 break;
4572 case EXITED:
4573 /* The inferior program is finished. */
4574 annotate_exited (stop_info);
4575 if (stop_info)
4576 {
4577 if (ui_out_is_mi_like_p (uiout))
4578 ui_out_field_string (uiout, "reason",
4579 async_reason_lookup (EXEC_ASYNC_EXITED));
4580 ui_out_text (uiout, "\nProgram exited with code ");
4581 ui_out_field_fmt (uiout, "exit-code", "0%o",
4582 (unsigned int) stop_info);
4583 ui_out_text (uiout, ".\n");
4584 }
4585 else
4586 {
4587 if (ui_out_is_mi_like_p (uiout))
4588 ui_out_field_string
4589 (uiout, "reason",
4590 async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
4591 ui_out_text (uiout, "\nProgram exited normally.\n");
4592 }
4593 /* Support the --return-child-result option. */
4594 return_child_result_value = stop_info;
4595 break;
4596 case SIGNAL_RECEIVED:
4597 /* Signal received. The signal table tells us to print about
4598 it. */
4599 annotate_signal ();
4600
4601 if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
4602 {
4603 struct thread_info *t = inferior_thread ();
4604
4605 ui_out_text (uiout, "\n[");
4606 ui_out_field_string (uiout, "thread-name",
4607 target_pid_to_str (t->ptid));
4608 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
4609 ui_out_text (uiout, " stopped");
4610 }
4611 else
4612 {
4613 ui_out_text (uiout, "\nProgram received signal ");
4614 annotate_signal_name ();
4615 if (ui_out_is_mi_like_p (uiout))
4616 ui_out_field_string
4617 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
4618 ui_out_field_string (uiout, "signal-name",
4619 target_signal_to_name (stop_info));
4620 annotate_signal_name_end ();
4621 ui_out_text (uiout, ", ");
4622 annotate_signal_string ();
4623 ui_out_field_string (uiout, "signal-meaning",
4624 target_signal_to_string (stop_info));
4625 annotate_signal_string_end ();
4626 }
4627 ui_out_text (uiout, ".\n");
4628 break;
4629 case NO_HISTORY:
4630 /* Reverse execution: target ran out of history info. */
4631 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
4632 break;
4633 default:
4634 internal_error (__FILE__, __LINE__,
4635 _("print_stop_reason: unrecognized enum value"));
4636 break;
4637 }
4638 }
4639 \f
4640
4641 /* Here to return control to GDB when the inferior stops for real.
4642 Print appropriate messages, remove breakpoints, give terminal our modes.
4643
4644 STOP_PRINT_FRAME nonzero means print the executing frame
4645 (pc, function, args, file, line number and line text).
4646 BREAKPOINTS_FAILED nonzero means stop was due to error
4647 attempting to insert breakpoints. */
4648
4649 void
4650 normal_stop (void)
4651 {
4652 struct target_waitstatus last;
4653 ptid_t last_ptid;
4654 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
4655
4656 get_last_target_status (&last_ptid, &last);
4657
4658 /* If an exception is thrown from this point on, make sure to
4659 propagate GDB's knowledge of the executing state to the
4660 frontend/user running state. A QUIT is an easy exception to see
4661 here, so do this before any filtered output. */
4662 if (!non_stop)
4663 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
4664 else if (last.kind != TARGET_WAITKIND_SIGNALLED
4665 && last.kind != TARGET_WAITKIND_EXITED)
4666 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
4667
4668 /* In non-stop mode, we don't want GDB to switch threads behind the
4669 user's back, to avoid races where the user is typing a command to
4670 apply to thread x, but GDB switches to thread y before the user
4671 finishes entering the command. */
4672
4673 /* As with the notification of thread events, we want to delay
4674 notifying the user that we've switched thread context until
4675 the inferior actually stops.
4676
4677 There's no point in saying anything if the inferior has exited.
4678 Note that SIGNALLED here means "exited with a signal", not
4679 "received a signal". */
4680 if (!non_stop
4681 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
4682 && target_has_execution
4683 && last.kind != TARGET_WAITKIND_SIGNALLED
4684 && last.kind != TARGET_WAITKIND_EXITED)
4685 {
4686 target_terminal_ours_for_output ();
4687 printf_filtered (_("[Switching to %s]\n"),
4688 target_pid_to_str (inferior_ptid));
4689 annotate_thread_changed ();
4690 previous_inferior_ptid = inferior_ptid;
4691 }
4692
4693 if (!breakpoints_always_inserted_mode () && target_has_execution)
4694 {
4695 if (remove_breakpoints ())
4696 {
4697 target_terminal_ours_for_output ();
4698 printf_filtered (_("\
4699 Cannot remove breakpoints because program is no longer writable.\n\
4700 Further execution is probably impossible.\n"));
4701 }
4702 }
4703
4704 /* If an auto-display called a function and that got a signal,
4705 delete that auto-display to avoid an infinite recursion. */
4706
4707 if (stopped_by_random_signal)
4708 disable_current_display ();
4709
4710 /* Don't print a message if in the middle of doing a "step n"
4711 operation for n > 1 */
4712 if (target_has_execution
4713 && last.kind != TARGET_WAITKIND_SIGNALLED
4714 && last.kind != TARGET_WAITKIND_EXITED
4715 && inferior_thread ()->step_multi
4716 && inferior_thread ()->stop_step)
4717 goto done;
4718
4719 target_terminal_ours ();
4720
4721 /* Set the current source location. This will also happen if we
4722 display the frame below, but the current SAL will be incorrect
4723 during a user hook-stop function. */
4724 if (has_stack_frames () && !stop_stack_dummy)
4725 set_current_sal_from_frame (get_current_frame (), 1);
4726
4727 /* Let the user/frontend see the threads as stopped. */
4728 do_cleanups (old_chain);
4729
4730 /* Look up the hook_stop and run it (CLI internally handles problem
4731 of stop_command's pre-hook not existing). */
4732 if (stop_command)
4733 catch_errors (hook_stop_stub, stop_command,
4734 "Error while running hook_stop:\n", RETURN_MASK_ALL);
4735
4736 if (!has_stack_frames ())
4737 goto done;
4738
4739 if (last.kind == TARGET_WAITKIND_SIGNALLED
4740 || last.kind == TARGET_WAITKIND_EXITED)
4741 goto done;
4742
4743 /* Select innermost stack frame - i.e., current frame is frame 0,
4744 and current location is based on that.
4745 Don't do this on return from a stack dummy routine,
4746 or if the program has exited. */
4747
4748 if (!stop_stack_dummy)
4749 {
4750 select_frame (get_current_frame ());
4751
4752 /* Print current location without a level number, if
4753 we have changed functions or hit a breakpoint.
4754 Print source line if we have one.
4755 bpstat_print() contains the logic deciding in detail
4756 what to print, based on the event(s) that just occurred. */
4757
4758 /* If --batch-silent is enabled then there's no need to print the current
4759 source location, and to try risks causing an error message about
4760 missing source files. */
4761 if (stop_print_frame && !batch_silent)
4762 {
4763 int bpstat_ret;
4764 int source_flag;
4765 int do_frame_printing = 1;
4766 struct thread_info *tp = inferior_thread ();
4767
4768 bpstat_ret = bpstat_print (tp->stop_bpstat);
4769 switch (bpstat_ret)
4770 {
4771 case PRINT_UNKNOWN:
4772 /* If we had hit a shared library event breakpoint,
4773 bpstat_print would print out this message. If we hit
4774 an OS-level shared library event, do the same
4775 thing. */
4776 if (last.kind == TARGET_WAITKIND_LOADED)
4777 {
4778 printf_filtered (_("Stopped due to shared library event\n"));
4779 source_flag = SRC_LINE; /* something bogus */
4780 do_frame_printing = 0;
4781 break;
4782 }
4783
4784 /* FIXME: cagney/2002-12-01: Given that a frame ID does
4785 (or should) carry around the function and does (or
4786 should) use that when doing a frame comparison. */
4787 if (tp->stop_step
4788 && frame_id_eq (tp->step_frame_id,
4789 get_frame_id (get_current_frame ()))
4790 && step_start_function == find_pc_function (stop_pc))
4791 source_flag = SRC_LINE; /* finished step, just print source line */
4792 else
4793 source_flag = SRC_AND_LOC; /* print location and source line */
4794 break;
4795 case PRINT_SRC_AND_LOC:
4796 source_flag = SRC_AND_LOC; /* print location and source line */
4797 break;
4798 case PRINT_SRC_ONLY:
4799 source_flag = SRC_LINE;
4800 break;
4801 case PRINT_NOTHING:
4802 source_flag = SRC_LINE; /* something bogus */
4803 do_frame_printing = 0;
4804 break;
4805 default:
4806 internal_error (__FILE__, __LINE__, _("Unknown value."));
4807 }
4808
4809 /* The behavior of this routine with respect to the source
4810 flag is:
4811 SRC_LINE: Print only source line
4812 LOCATION: Print only location
4813 SRC_AND_LOC: Print location and source line */
4814 if (do_frame_printing)
4815 print_stack_frame (get_selected_frame (NULL), 0, source_flag);
4816
4817 /* Display the auto-display expressions. */
4818 do_displays ();
4819 }
4820 }
4821
4822 /* Save the function value return registers, if we care.
4823 We might be about to restore their previous contents. */
4824 if (inferior_thread ()->proceed_to_finish)
4825 {
4826 /* This should not be necessary. */
4827 if (stop_registers)
4828 regcache_xfree (stop_registers);
4829
4830 /* NB: The copy goes through to the target picking up the value of
4831 all the registers. */
4832 stop_registers = regcache_dup (get_current_regcache ());
4833 }
4834
4835 if (stop_stack_dummy)
4836 {
4837 /* Pop the empty frame that contains the stack dummy.
4838 This also restores inferior state prior to the call
4839 (struct inferior_thread_state). */
4840 struct frame_info *frame = get_current_frame ();
4841 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
4842 frame_pop (frame);
4843 /* frame_pop() calls reinit_frame_cache as the last thing it does
4844 which means there's currently no selected frame. We don't need
4845 to re-establish a selected frame if the dummy call returns normally,
4846 that will be done by restore_inferior_status. However, we do have
4847 to handle the case where the dummy call is returning after being
4848 stopped (e.g. the dummy call previously hit a breakpoint). We
4849 can't know which case we have so just always re-establish a
4850 selected frame here. */
4851 select_frame (get_current_frame ());
4852 }
4853
4854 done:
4855 annotate_stopped ();
4856
4857 /* Suppress the stop observer if we're in the middle of:
4858
4859 - a step n (n > 1), as there still more steps to be done.
4860
4861 - a "finish" command, as the observer will be called in
4862 finish_command_continuation, so it can include the inferior
4863 function's return value.
4864
4865 - calling an inferior function, as we pretend we inferior didn't
4866 run at all. The return value of the call is handled by the
4867 expression evaluator, through call_function_by_hand. */
4868
4869 if (!target_has_execution
4870 || last.kind == TARGET_WAITKIND_SIGNALLED
4871 || last.kind == TARGET_WAITKIND_EXITED
4872 || (!inferior_thread ()->step_multi
4873 && !(inferior_thread ()->stop_bpstat
4874 && inferior_thread ()->proceed_to_finish)
4875 && !inferior_thread ()->in_infcall))
4876 {
4877 if (!ptid_equal (inferior_ptid, null_ptid))
4878 observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
4879 stop_print_frame);
4880 else
4881 observer_notify_normal_stop (NULL, stop_print_frame);
4882 }
4883
4884 if (target_has_execution)
4885 {
4886 if (last.kind != TARGET_WAITKIND_SIGNALLED
4887 && last.kind != TARGET_WAITKIND_EXITED)
4888 /* Delete the breakpoint we stopped at, if it wants to be deleted.
4889 Delete any breakpoint that is to be deleted at the next stop. */
4890 breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
4891 }
4892 }
4893
4894 static int
4895 hook_stop_stub (void *cmd)
4896 {
4897 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
4898 return (0);
4899 }
4900 \f
4901 int
4902 signal_stop_state (int signo)
4903 {
4904 return signal_stop[signo];
4905 }
4906
4907 int
4908 signal_print_state (int signo)
4909 {
4910 return signal_print[signo];
4911 }
4912
4913 int
4914 signal_pass_state (int signo)
4915 {
4916 return signal_program[signo];
4917 }
4918
4919 int
4920 signal_stop_update (int signo, int state)
4921 {
4922 int ret = signal_stop[signo];
4923 signal_stop[signo] = state;
4924 return ret;
4925 }
4926
4927 int
4928 signal_print_update (int signo, int state)
4929 {
4930 int ret = signal_print[signo];
4931 signal_print[signo] = state;
4932 return ret;
4933 }
4934
4935 int
4936 signal_pass_update (int signo, int state)
4937 {
4938 int ret = signal_program[signo];
4939 signal_program[signo] = state;
4940 return ret;
4941 }
4942
4943 static void
4944 sig_print_header (void)
4945 {
4946 printf_filtered (_("\
4947 Signal Stop\tPrint\tPass to program\tDescription\n"));
4948 }
4949
4950 static void
4951 sig_print_info (enum target_signal oursig)
4952 {
4953 const char *name = target_signal_to_name (oursig);
4954 int name_padding = 13 - strlen (name);
4955
4956 if (name_padding <= 0)
4957 name_padding = 0;
4958
4959 printf_filtered ("%s", name);
4960 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
4961 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
4962 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
4963 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
4964 printf_filtered ("%s\n", target_signal_to_string (oursig));
4965 }
4966
4967 /* Specify how various signals in the inferior should be handled. */
4968
4969 static void
4970 handle_command (char *args, int from_tty)
4971 {
4972 char **argv;
4973 int digits, wordlen;
4974 int sigfirst, signum, siglast;
4975 enum target_signal oursig;
4976 int allsigs;
4977 int nsigs;
4978 unsigned char *sigs;
4979 struct cleanup *old_chain;
4980
4981 if (args == NULL)
4982 {
4983 error_no_arg (_("signal to handle"));
4984 }
4985
4986 /* Allocate and zero an array of flags for which signals to handle. */
4987
4988 nsigs = (int) TARGET_SIGNAL_LAST;
4989 sigs = (unsigned char *) alloca (nsigs);
4990 memset (sigs, 0, nsigs);
4991
4992 /* Break the command line up into args. */
4993
4994 argv = gdb_buildargv (args);
4995 old_chain = make_cleanup_freeargv (argv);
4996
4997 /* Walk through the args, looking for signal oursigs, signal names, and
4998 actions. Signal numbers and signal names may be interspersed with
4999 actions, with the actions being performed for all signals cumulatively
5000 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
5001
5002 while (*argv != NULL)
5003 {
5004 wordlen = strlen (*argv);
5005 for (digits = 0; isdigit ((*argv)[digits]); digits++)
5006 {;
5007 }
5008 allsigs = 0;
5009 sigfirst = siglast = -1;
5010
5011 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5012 {
5013 /* Apply action to all signals except those used by the
5014 debugger. Silently skip those. */
5015 allsigs = 1;
5016 sigfirst = 0;
5017 siglast = nsigs - 1;
5018 }
5019 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5020 {
5021 SET_SIGS (nsigs, sigs, signal_stop);
5022 SET_SIGS (nsigs, sigs, signal_print);
5023 }
5024 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5025 {
5026 UNSET_SIGS (nsigs, sigs, signal_program);
5027 }
5028 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5029 {
5030 SET_SIGS (nsigs, sigs, signal_print);
5031 }
5032 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5033 {
5034 SET_SIGS (nsigs, sigs, signal_program);
5035 }
5036 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5037 {
5038 UNSET_SIGS (nsigs, sigs, signal_stop);
5039 }
5040 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5041 {
5042 SET_SIGS (nsigs, sigs, signal_program);
5043 }
5044 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5045 {
5046 UNSET_SIGS (nsigs, sigs, signal_print);
5047 UNSET_SIGS (nsigs, sigs, signal_stop);
5048 }
5049 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5050 {
5051 UNSET_SIGS (nsigs, sigs, signal_program);
5052 }
5053 else if (digits > 0)
5054 {
5055 /* It is numeric. The numeric signal refers to our own
5056 internal signal numbering from target.h, not to host/target
5057 signal number. This is a feature; users really should be
5058 using symbolic names anyway, and the common ones like
5059 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
5060
5061 sigfirst = siglast = (int)
5062 target_signal_from_command (atoi (*argv));
5063 if ((*argv)[digits] == '-')
5064 {
5065 siglast = (int)
5066 target_signal_from_command (atoi ((*argv) + digits + 1));
5067 }
5068 if (sigfirst > siglast)
5069 {
5070 /* Bet he didn't figure we'd think of this case... */
5071 signum = sigfirst;
5072 sigfirst = siglast;
5073 siglast = signum;
5074 }
5075 }
5076 else
5077 {
5078 oursig = target_signal_from_name (*argv);
5079 if (oursig != TARGET_SIGNAL_UNKNOWN)
5080 {
5081 sigfirst = siglast = (int) oursig;
5082 }
5083 else
5084 {
5085 /* Not a number and not a recognized flag word => complain. */
5086 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5087 }
5088 }
5089
5090 /* If any signal numbers or symbol names were found, set flags for
5091 which signals to apply actions to. */
5092
5093 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5094 {
5095 switch ((enum target_signal) signum)
5096 {
5097 case TARGET_SIGNAL_TRAP:
5098 case TARGET_SIGNAL_INT:
5099 if (!allsigs && !sigs[signum])
5100 {
5101 if (query (_("%s is used by the debugger.\n\
5102 Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5103 {
5104 sigs[signum] = 1;
5105 }
5106 else
5107 {
5108 printf_unfiltered (_("Not confirmed, unchanged.\n"));
5109 gdb_flush (gdb_stdout);
5110 }
5111 }
5112 break;
5113 case TARGET_SIGNAL_0:
5114 case TARGET_SIGNAL_DEFAULT:
5115 case TARGET_SIGNAL_UNKNOWN:
5116 /* Make sure that "all" doesn't print these. */
5117 break;
5118 default:
5119 sigs[signum] = 1;
5120 break;
5121 }
5122 }
5123
5124 argv++;
5125 }
5126
5127 for (signum = 0; signum < nsigs; signum++)
5128 if (sigs[signum])
5129 {
5130 target_notice_signals (inferior_ptid);
5131
5132 if (from_tty)
5133 {
5134 /* Show the results. */
5135 sig_print_header ();
5136 for (; signum < nsigs; signum++)
5137 if (sigs[signum])
5138 sig_print_info (signum);
5139 }
5140
5141 break;
5142 }
5143
5144 do_cleanups (old_chain);
5145 }
5146
5147 static void
5148 xdb_handle_command (char *args, int from_tty)
5149 {
5150 char **argv;
5151 struct cleanup *old_chain;
5152
5153 if (args == NULL)
5154 error_no_arg (_("xdb command"));
5155
5156 /* Break the command line up into args. */
5157
5158 argv = gdb_buildargv (args);
5159 old_chain = make_cleanup_freeargv (argv);
5160 if (argv[1] != (char *) NULL)
5161 {
5162 char *argBuf;
5163 int bufLen;
5164
5165 bufLen = strlen (argv[0]) + 20;
5166 argBuf = (char *) xmalloc (bufLen);
5167 if (argBuf)
5168 {
5169 int validFlag = 1;
5170 enum target_signal oursig;
5171
5172 oursig = target_signal_from_name (argv[0]);
5173 memset (argBuf, 0, bufLen);
5174 if (strcmp (argv[1], "Q") == 0)
5175 sprintf (argBuf, "%s %s", argv[0], "noprint");
5176 else
5177 {
5178 if (strcmp (argv[1], "s") == 0)
5179 {
5180 if (!signal_stop[oursig])
5181 sprintf (argBuf, "%s %s", argv[0], "stop");
5182 else
5183 sprintf (argBuf, "%s %s", argv[0], "nostop");
5184 }
5185 else if (strcmp (argv[1], "i") == 0)
5186 {
5187 if (!signal_program[oursig])
5188 sprintf (argBuf, "%s %s", argv[0], "pass");
5189 else
5190 sprintf (argBuf, "%s %s", argv[0], "nopass");
5191 }
5192 else if (strcmp (argv[1], "r") == 0)
5193 {
5194 if (!signal_print[oursig])
5195 sprintf (argBuf, "%s %s", argv[0], "print");
5196 else
5197 sprintf (argBuf, "%s %s", argv[0], "noprint");
5198 }
5199 else
5200 validFlag = 0;
5201 }
5202 if (validFlag)
5203 handle_command (argBuf, from_tty);
5204 else
5205 printf_filtered (_("Invalid signal handling flag.\n"));
5206 if (argBuf)
5207 xfree (argBuf);
5208 }
5209 }
5210 do_cleanups (old_chain);
5211 }
5212
5213 /* Print current contents of the tables set by the handle command.
5214 It is possible we should just be printing signals actually used
5215 by the current target (but for things to work right when switching
5216 targets, all signals should be in the signal tables). */
5217
5218 static void
5219 signals_info (char *signum_exp, int from_tty)
5220 {
5221 enum target_signal oursig;
5222 sig_print_header ();
5223
5224 if (signum_exp)
5225 {
5226 /* First see if this is a symbol name. */
5227 oursig = target_signal_from_name (signum_exp);
5228 if (oursig == TARGET_SIGNAL_UNKNOWN)
5229 {
5230 /* No, try numeric. */
5231 oursig =
5232 target_signal_from_command (parse_and_eval_long (signum_exp));
5233 }
5234 sig_print_info (oursig);
5235 return;
5236 }
5237
5238 printf_filtered ("\n");
5239 /* These ugly casts brought to you by the native VAX compiler. */
5240 for (oursig = TARGET_SIGNAL_FIRST;
5241 (int) oursig < (int) TARGET_SIGNAL_LAST;
5242 oursig = (enum target_signal) ((int) oursig + 1))
5243 {
5244 QUIT;
5245
5246 if (oursig != TARGET_SIGNAL_UNKNOWN
5247 && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5248 sig_print_info (oursig);
5249 }
5250
5251 printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5252 }
5253
5254 /* The $_siginfo convenience variable is a bit special. We don't know
5255 for sure the type of the value until we actually have a chance to
5256 fetch the data. The type can change depending on gdbarch, so it it
5257 also dependent on which thread you have selected.
5258
5259 1. making $_siginfo be an internalvar that creates a new value on
5260 access.
5261
5262 2. making the value of $_siginfo be an lval_computed value. */
5263
5264 /* This function implements the lval_computed support for reading a
5265 $_siginfo value. */
5266
5267 static void
5268 siginfo_value_read (struct value *v)
5269 {
5270 LONGEST transferred;
5271
5272 transferred =
5273 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5274 NULL,
5275 value_contents_all_raw (v),
5276 value_offset (v),
5277 TYPE_LENGTH (value_type (v)));
5278
5279 if (transferred != TYPE_LENGTH (value_type (v)))
5280 error (_("Unable to read siginfo"));
5281 }
5282
5283 /* This function implements the lval_computed support for writing a
5284 $_siginfo value. */
5285
5286 static void
5287 siginfo_value_write (struct value *v, struct value *fromval)
5288 {
5289 LONGEST transferred;
5290
5291 transferred = target_write (&current_target,
5292 TARGET_OBJECT_SIGNAL_INFO,
5293 NULL,
5294 value_contents_all_raw (fromval),
5295 value_offset (v),
5296 TYPE_LENGTH (value_type (fromval)));
5297
5298 if (transferred != TYPE_LENGTH (value_type (fromval)))
5299 error (_("Unable to write siginfo"));
5300 }
5301
5302 static struct lval_funcs siginfo_value_funcs =
5303 {
5304 siginfo_value_read,
5305 siginfo_value_write
5306 };
5307
5308 /* Return a new value with the correct type for the siginfo object of
5309 the current thread using architecture GDBARCH. Return a void value
5310 if there's no object available. */
5311
5312 static struct value *
5313 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5314 {
5315 if (target_has_stack
5316 && !ptid_equal (inferior_ptid, null_ptid)
5317 && gdbarch_get_siginfo_type_p (gdbarch))
5318 {
5319 struct type *type = gdbarch_get_siginfo_type (gdbarch);
5320 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5321 }
5322
5323 return allocate_value (builtin_type (gdbarch)->builtin_void);
5324 }
5325
5326 \f
5327 /* Inferior thread state.
5328 These are details related to the inferior itself, and don't include
5329 things like what frame the user had selected or what gdb was doing
5330 with the target at the time.
5331 For inferior function calls these are things we want to restore
5332 regardless of whether the function call successfully completes
5333 or the dummy frame has to be manually popped. */
5334
5335 struct inferior_thread_state
5336 {
5337 enum target_signal stop_signal;
5338 CORE_ADDR stop_pc;
5339 struct regcache *registers;
5340 };
5341
5342 struct inferior_thread_state *
5343 save_inferior_thread_state (void)
5344 {
5345 struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
5346 struct thread_info *tp = inferior_thread ();
5347
5348 inf_state->stop_signal = tp->stop_signal;
5349 inf_state->stop_pc = stop_pc;
5350
5351 inf_state->registers = regcache_dup (get_current_regcache ());
5352
5353 return inf_state;
5354 }
5355
5356 /* Restore inferior session state to INF_STATE. */
5357
5358 void
5359 restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5360 {
5361 struct thread_info *tp = inferior_thread ();
5362
5363 tp->stop_signal = inf_state->stop_signal;
5364 stop_pc = inf_state->stop_pc;
5365
5366 /* The inferior can be gone if the user types "print exit(0)"
5367 (and perhaps other times). */
5368 if (target_has_execution)
5369 /* NB: The register write goes through to the target. */
5370 regcache_cpy (get_current_regcache (), inf_state->registers);
5371 regcache_xfree (inf_state->registers);
5372 xfree (inf_state);
5373 }
5374
5375 static void
5376 do_restore_inferior_thread_state_cleanup (void *state)
5377 {
5378 restore_inferior_thread_state (state);
5379 }
5380
5381 struct cleanup *
5382 make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
5383 {
5384 return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
5385 }
5386
5387 void
5388 discard_inferior_thread_state (struct inferior_thread_state *inf_state)
5389 {
5390 regcache_xfree (inf_state->registers);
5391 xfree (inf_state);
5392 }
5393
5394 struct regcache *
5395 get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
5396 {
5397 return inf_state->registers;
5398 }
5399
5400 /* Session related state for inferior function calls.
5401 These are the additional bits of state that need to be restored
5402 when an inferior function call successfully completes. */
5403
5404 struct inferior_status
5405 {
5406 bpstat stop_bpstat;
5407 int stop_step;
5408 int stop_stack_dummy;
5409 int stopped_by_random_signal;
5410 int stepping_over_breakpoint;
5411 CORE_ADDR step_range_start;
5412 CORE_ADDR step_range_end;
5413 struct frame_id step_frame_id;
5414 struct frame_id step_stack_frame_id;
5415 enum step_over_calls_kind step_over_calls;
5416 CORE_ADDR step_resume_break_address;
5417 int stop_after_trap;
5418 int stop_soon;
5419
5420 /* ID if the selected frame when the inferior function call was made. */
5421 struct frame_id selected_frame_id;
5422
5423 int proceed_to_finish;
5424 int in_infcall;
5425 };
5426
5427 /* Save all of the information associated with the inferior<==>gdb
5428 connection. */
5429
5430 struct inferior_status *
5431 save_inferior_status (void)
5432 {
5433 struct inferior_status *inf_status = XMALLOC (struct inferior_status);
5434 struct thread_info *tp = inferior_thread ();
5435 struct inferior *inf = current_inferior ();
5436
5437 inf_status->stop_step = tp->stop_step;
5438 inf_status->stop_stack_dummy = stop_stack_dummy;
5439 inf_status->stopped_by_random_signal = stopped_by_random_signal;
5440 inf_status->stepping_over_breakpoint = tp->trap_expected;
5441 inf_status->step_range_start = tp->step_range_start;
5442 inf_status->step_range_end = tp->step_range_end;
5443 inf_status->step_frame_id = tp->step_frame_id;
5444 inf_status->step_stack_frame_id = tp->step_stack_frame_id;
5445 inf_status->step_over_calls = tp->step_over_calls;
5446 inf_status->stop_after_trap = stop_after_trap;
5447 inf_status->stop_soon = inf->stop_soon;
5448 /* Save original bpstat chain here; replace it with copy of chain.
5449 If caller's caller is walking the chain, they'll be happier if we
5450 hand them back the original chain when restore_inferior_status is
5451 called. */
5452 inf_status->stop_bpstat = tp->stop_bpstat;
5453 tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
5454 inf_status->proceed_to_finish = tp->proceed_to_finish;
5455 inf_status->in_infcall = tp->in_infcall;
5456
5457 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
5458
5459 return inf_status;
5460 }
5461
5462 static int
5463 restore_selected_frame (void *args)
5464 {
5465 struct frame_id *fid = (struct frame_id *) args;
5466 struct frame_info *frame;
5467
5468 frame = frame_find_by_id (*fid);
5469
5470 /* If inf_status->selected_frame_id is NULL, there was no previously
5471 selected frame. */
5472 if (frame == NULL)
5473 {
5474 warning (_("Unable to restore previously selected frame."));
5475 return 0;
5476 }
5477
5478 select_frame (frame);
5479
5480 return (1);
5481 }
5482
5483 /* Restore inferior session state to INF_STATUS. */
5484
5485 void
5486 restore_inferior_status (struct inferior_status *inf_status)
5487 {
5488 struct thread_info *tp = inferior_thread ();
5489 struct inferior *inf = current_inferior ();
5490
5491 tp->stop_step = inf_status->stop_step;
5492 stop_stack_dummy = inf_status->stop_stack_dummy;
5493 stopped_by_random_signal = inf_status->stopped_by_random_signal;
5494 tp->trap_expected = inf_status->stepping_over_breakpoint;
5495 tp->step_range_start = inf_status->step_range_start;
5496 tp->step_range_end = inf_status->step_range_end;
5497 tp->step_frame_id = inf_status->step_frame_id;
5498 tp->step_stack_frame_id = inf_status->step_stack_frame_id;
5499 tp->step_over_calls = inf_status->step_over_calls;
5500 stop_after_trap = inf_status->stop_after_trap;
5501 inf->stop_soon = inf_status->stop_soon;
5502 bpstat_clear (&tp->stop_bpstat);
5503 tp->stop_bpstat = inf_status->stop_bpstat;
5504 inf_status->stop_bpstat = NULL;
5505 tp->proceed_to_finish = inf_status->proceed_to_finish;
5506 tp->in_infcall = inf_status->in_infcall;
5507
5508 if (target_has_stack)
5509 {
5510 /* The point of catch_errors is that if the stack is clobbered,
5511 walking the stack might encounter a garbage pointer and
5512 error() trying to dereference it. */
5513 if (catch_errors
5514 (restore_selected_frame, &inf_status->selected_frame_id,
5515 "Unable to restore previously selected frame:\n",
5516 RETURN_MASK_ERROR) == 0)
5517 /* Error in restoring the selected frame. Select the innermost
5518 frame. */
5519 select_frame (get_current_frame ());
5520 }
5521
5522 xfree (inf_status);
5523 }
5524
5525 static void
5526 do_restore_inferior_status_cleanup (void *sts)
5527 {
5528 restore_inferior_status (sts);
5529 }
5530
5531 struct cleanup *
5532 make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
5533 {
5534 return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
5535 }
5536
5537 void
5538 discard_inferior_status (struct inferior_status *inf_status)
5539 {
5540 /* See save_inferior_status for info on stop_bpstat. */
5541 bpstat_clear (&inf_status->stop_bpstat);
5542 xfree (inf_status);
5543 }
5544 \f
5545 int
5546 inferior_has_forked (ptid_t pid, ptid_t *child_pid)
5547 {
5548 struct target_waitstatus last;
5549 ptid_t last_ptid;
5550
5551 get_last_target_status (&last_ptid, &last);
5552
5553 if (last.kind != TARGET_WAITKIND_FORKED)
5554 return 0;
5555
5556 if (!ptid_equal (last_ptid, pid))
5557 return 0;
5558
5559 *child_pid = last.value.related_pid;
5560 return 1;
5561 }
5562
5563 int
5564 inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
5565 {
5566 struct target_waitstatus last;
5567 ptid_t last_ptid;
5568
5569 get_last_target_status (&last_ptid, &last);
5570
5571 if (last.kind != TARGET_WAITKIND_VFORKED)
5572 return 0;
5573
5574 if (!ptid_equal (last_ptid, pid))
5575 return 0;
5576
5577 *child_pid = last.value.related_pid;
5578 return 1;
5579 }
5580
5581 int
5582 inferior_has_execd (ptid_t pid, char **execd_pathname)
5583 {
5584 struct target_waitstatus last;
5585 ptid_t last_ptid;
5586
5587 get_last_target_status (&last_ptid, &last);
5588
5589 if (last.kind != TARGET_WAITKIND_EXECD)
5590 return 0;
5591
5592 if (!ptid_equal (last_ptid, pid))
5593 return 0;
5594
5595 *execd_pathname = xstrdup (last.value.execd_pathname);
5596 return 1;
5597 }
5598
5599 /* Oft used ptids */
5600 ptid_t null_ptid;
5601 ptid_t minus_one_ptid;
5602
5603 /* Create a ptid given the necessary PID, LWP, and TID components. */
5604
5605 ptid_t
5606 ptid_build (int pid, long lwp, long tid)
5607 {
5608 ptid_t ptid;
5609
5610 ptid.pid = pid;
5611 ptid.lwp = lwp;
5612 ptid.tid = tid;
5613 return ptid;
5614 }
5615
5616 /* Create a ptid from just a pid. */
5617
5618 ptid_t
5619 pid_to_ptid (int pid)
5620 {
5621 return ptid_build (pid, 0, 0);
5622 }
5623
5624 /* Fetch the pid (process id) component from a ptid. */
5625
5626 int
5627 ptid_get_pid (ptid_t ptid)
5628 {
5629 return ptid.pid;
5630 }
5631
5632 /* Fetch the lwp (lightweight process) component from a ptid. */
5633
5634 long
5635 ptid_get_lwp (ptid_t ptid)
5636 {
5637 return ptid.lwp;
5638 }
5639
5640 /* Fetch the tid (thread id) component from a ptid. */
5641
5642 long
5643 ptid_get_tid (ptid_t ptid)
5644 {
5645 return ptid.tid;
5646 }
5647
5648 /* ptid_equal() is used to test equality of two ptids. */
5649
5650 int
5651 ptid_equal (ptid_t ptid1, ptid_t ptid2)
5652 {
5653 return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
5654 && ptid1.tid == ptid2.tid);
5655 }
5656
5657 /* Returns true if PTID represents a process. */
5658
5659 int
5660 ptid_is_pid (ptid_t ptid)
5661 {
5662 if (ptid_equal (minus_one_ptid, ptid))
5663 return 0;
5664 if (ptid_equal (null_ptid, ptid))
5665 return 0;
5666
5667 return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
5668 }
5669
5670 /* restore_inferior_ptid() will be used by the cleanup machinery
5671 to restore the inferior_ptid value saved in a call to
5672 save_inferior_ptid(). */
5673
5674 static void
5675 restore_inferior_ptid (void *arg)
5676 {
5677 ptid_t *saved_ptid_ptr = arg;
5678 inferior_ptid = *saved_ptid_ptr;
5679 xfree (arg);
5680 }
5681
5682 /* Save the value of inferior_ptid so that it may be restored by a
5683 later call to do_cleanups(). Returns the struct cleanup pointer
5684 needed for later doing the cleanup. */
5685
5686 struct cleanup *
5687 save_inferior_ptid (void)
5688 {
5689 ptid_t *saved_ptid_ptr;
5690
5691 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
5692 *saved_ptid_ptr = inferior_ptid;
5693 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
5694 }
5695 \f
5696
5697 /* User interface for reverse debugging:
5698 Set exec-direction / show exec-direction commands
5699 (returns error unless target implements to_set_exec_direction method). */
5700
5701 enum exec_direction_kind execution_direction = EXEC_FORWARD;
5702 static const char exec_forward[] = "forward";
5703 static const char exec_reverse[] = "reverse";
5704 static const char *exec_direction = exec_forward;
5705 static const char *exec_direction_names[] = {
5706 exec_forward,
5707 exec_reverse,
5708 NULL
5709 };
5710
5711 static void
5712 set_exec_direction_func (char *args, int from_tty,
5713 struct cmd_list_element *cmd)
5714 {
5715 if (target_can_execute_reverse)
5716 {
5717 if (!strcmp (exec_direction, exec_forward))
5718 execution_direction = EXEC_FORWARD;
5719 else if (!strcmp (exec_direction, exec_reverse))
5720 execution_direction = EXEC_REVERSE;
5721 }
5722 }
5723
5724 static void
5725 show_exec_direction_func (struct ui_file *out, int from_tty,
5726 struct cmd_list_element *cmd, const char *value)
5727 {
5728 switch (execution_direction) {
5729 case EXEC_FORWARD:
5730 fprintf_filtered (out, _("Forward.\n"));
5731 break;
5732 case EXEC_REVERSE:
5733 fprintf_filtered (out, _("Reverse.\n"));
5734 break;
5735 case EXEC_ERROR:
5736 default:
5737 fprintf_filtered (out,
5738 _("Forward (target `%s' does not support exec-direction).\n"),
5739 target_shortname);
5740 break;
5741 }
5742 }
5743
5744 /* User interface for non-stop mode. */
5745
5746 int non_stop = 0;
5747 static int non_stop_1 = 0;
5748
5749 static void
5750 set_non_stop (char *args, int from_tty,
5751 struct cmd_list_element *c)
5752 {
5753 if (target_has_execution)
5754 {
5755 non_stop_1 = non_stop;
5756 error (_("Cannot change this setting while the inferior is running."));
5757 }
5758
5759 non_stop = non_stop_1;
5760 }
5761
5762 static void
5763 show_non_stop (struct ui_file *file, int from_tty,
5764 struct cmd_list_element *c, const char *value)
5765 {
5766 fprintf_filtered (file,
5767 _("Controlling the inferior in non-stop mode is %s.\n"),
5768 value);
5769 }
5770
5771 static void
5772 show_schedule_multiple (struct ui_file *file, int from_tty,
5773 struct cmd_list_element *c, const char *value)
5774 {
5775 fprintf_filtered (file, _("\
5776 Resuming the execution of threads of all processes is %s.\n"), value);
5777 }
5778
5779 void
5780 _initialize_infrun (void)
5781 {
5782 int i;
5783 int numsigs;
5784 struct cmd_list_element *c;
5785
5786 add_info ("signals", signals_info, _("\
5787 What debugger does when program gets various signals.\n\
5788 Specify a signal as argument to print info on that signal only."));
5789 add_info_alias ("handle", "signals", 0);
5790
5791 add_com ("handle", class_run, handle_command, _("\
5792 Specify how to handle a signal.\n\
5793 Args are signals and actions to apply to those signals.\n\
5794 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
5795 from 1-15 are allowed for compatibility with old versions of GDB.\n\
5796 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
5797 The special arg \"all\" is recognized to mean all signals except those\n\
5798 used by the debugger, typically SIGTRAP and SIGINT.\n\
5799 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
5800 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
5801 Stop means reenter debugger if this signal happens (implies print).\n\
5802 Print means print a message if this signal happens.\n\
5803 Pass means let program see this signal; otherwise program doesn't know.\n\
5804 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
5805 Pass and Stop may be combined."));
5806 if (xdb_commands)
5807 {
5808 add_com ("lz", class_info, signals_info, _("\
5809 What debugger does when program gets various signals.\n\
5810 Specify a signal as argument to print info on that signal only."));
5811 add_com ("z", class_run, xdb_handle_command, _("\
5812 Specify how to handle a signal.\n\
5813 Args are signals and actions to apply to those signals.\n\
5814 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
5815 from 1-15 are allowed for compatibility with old versions of GDB.\n\
5816 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
5817 The special arg \"all\" is recognized to mean all signals except those\n\
5818 used by the debugger, typically SIGTRAP and SIGINT.\n\
5819 Recognized actions include \"s\" (toggles between stop and nostop), \n\
5820 \"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
5821 nopass), \"Q\" (noprint)\n\
5822 Stop means reenter debugger if this signal happens (implies print).\n\
5823 Print means print a message if this signal happens.\n\
5824 Pass means let program see this signal; otherwise program doesn't know.\n\
5825 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
5826 Pass and Stop may be combined."));
5827 }
5828
5829 if (!dbx_commands)
5830 stop_command = add_cmd ("stop", class_obscure,
5831 not_just_help_class_command, _("\
5832 There is no `stop' command, but you can set a hook on `stop'.\n\
5833 This allows you to set a list of commands to be run each time execution\n\
5834 of the program stops."), &cmdlist);
5835
5836 add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
5837 Set inferior debugging."), _("\
5838 Show inferior debugging."), _("\
5839 When non-zero, inferior specific debugging is enabled."),
5840 NULL,
5841 show_debug_infrun,
5842 &setdebuglist, &showdebuglist);
5843
5844 add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
5845 Set displaced stepping debugging."), _("\
5846 Show displaced stepping debugging."), _("\
5847 When non-zero, displaced stepping specific debugging is enabled."),
5848 NULL,
5849 show_debug_displaced,
5850 &setdebuglist, &showdebuglist);
5851
5852 add_setshow_boolean_cmd ("non-stop", no_class,
5853 &non_stop_1, _("\
5854 Set whether gdb controls the inferior in non-stop mode."), _("\
5855 Show whether gdb controls the inferior in non-stop mode."), _("\
5856 When debugging a multi-threaded program and this setting is\n\
5857 off (the default, also called all-stop mode), when one thread stops\n\
5858 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
5859 all other threads in the program while you interact with the thread of\n\
5860 interest. When you continue or step a thread, you can allow the other\n\
5861 threads to run, or have them remain stopped, but while you inspect any\n\
5862 thread's state, all threads stop.\n\
5863 \n\
5864 In non-stop mode, when one thread stops, other threads can continue\n\
5865 to run freely. You'll be able to step each thread independently,\n\
5866 leave it stopped or free to run as needed."),
5867 set_non_stop,
5868 show_non_stop,
5869 &setlist,
5870 &showlist);
5871
5872 numsigs = (int) TARGET_SIGNAL_LAST;
5873 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
5874 signal_print = (unsigned char *)
5875 xmalloc (sizeof (signal_print[0]) * numsigs);
5876 signal_program = (unsigned char *)
5877 xmalloc (sizeof (signal_program[0]) * numsigs);
5878 for (i = 0; i < numsigs; i++)
5879 {
5880 signal_stop[i] = 1;
5881 signal_print[i] = 1;
5882 signal_program[i] = 1;
5883 }
5884
5885 /* Signals caused by debugger's own actions
5886 should not be given to the program afterwards. */
5887 signal_program[TARGET_SIGNAL_TRAP] = 0;
5888 signal_program[TARGET_SIGNAL_INT] = 0;
5889
5890 /* Signals that are not errors should not normally enter the debugger. */
5891 signal_stop[TARGET_SIGNAL_ALRM] = 0;
5892 signal_print[TARGET_SIGNAL_ALRM] = 0;
5893 signal_stop[TARGET_SIGNAL_VTALRM] = 0;
5894 signal_print[TARGET_SIGNAL_VTALRM] = 0;
5895 signal_stop[TARGET_SIGNAL_PROF] = 0;
5896 signal_print[TARGET_SIGNAL_PROF] = 0;
5897 signal_stop[TARGET_SIGNAL_CHLD] = 0;
5898 signal_print[TARGET_SIGNAL_CHLD] = 0;
5899 signal_stop[TARGET_SIGNAL_IO] = 0;
5900 signal_print[TARGET_SIGNAL_IO] = 0;
5901 signal_stop[TARGET_SIGNAL_POLL] = 0;
5902 signal_print[TARGET_SIGNAL_POLL] = 0;
5903 signal_stop[TARGET_SIGNAL_URG] = 0;
5904 signal_print[TARGET_SIGNAL_URG] = 0;
5905 signal_stop[TARGET_SIGNAL_WINCH] = 0;
5906 signal_print[TARGET_SIGNAL_WINCH] = 0;
5907
5908 /* These signals are used internally by user-level thread
5909 implementations. (See signal(5) on Solaris.) Like the above
5910 signals, a healthy program receives and handles them as part of
5911 its normal operation. */
5912 signal_stop[TARGET_SIGNAL_LWP] = 0;
5913 signal_print[TARGET_SIGNAL_LWP] = 0;
5914 signal_stop[TARGET_SIGNAL_WAITING] = 0;
5915 signal_print[TARGET_SIGNAL_WAITING] = 0;
5916 signal_stop[TARGET_SIGNAL_CANCEL] = 0;
5917 signal_print[TARGET_SIGNAL_CANCEL] = 0;
5918
5919 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
5920 &stop_on_solib_events, _("\
5921 Set stopping for shared library events."), _("\
5922 Show stopping for shared library events."), _("\
5923 If nonzero, gdb will give control to the user when the dynamic linker\n\
5924 notifies gdb of shared library events. The most common event of interest\n\
5925 to the user would be loading/unloading of a new library."),
5926 NULL,
5927 show_stop_on_solib_events,
5928 &setlist, &showlist);
5929
5930 add_setshow_enum_cmd ("follow-fork-mode", class_run,
5931 follow_fork_mode_kind_names,
5932 &follow_fork_mode_string, _("\
5933 Set debugger response to a program call of fork or vfork."), _("\
5934 Show debugger response to a program call of fork or vfork."), _("\
5935 A fork or vfork creates a new process. follow-fork-mode can be:\n\
5936 parent - the original process is debugged after a fork\n\
5937 child - the new process is debugged after a fork\n\
5938 The unfollowed process will continue to run.\n\
5939 By default, the debugger will follow the parent process."),
5940 NULL,
5941 show_follow_fork_mode_string,
5942 &setlist, &showlist);
5943
5944 add_setshow_enum_cmd ("scheduler-locking", class_run,
5945 scheduler_enums, &scheduler_mode, _("\
5946 Set mode for locking scheduler during execution."), _("\
5947 Show mode for locking scheduler during execution."), _("\
5948 off == no locking (threads may preempt at any time)\n\
5949 on == full locking (no thread except the current thread may run)\n\
5950 step == scheduler locked during every single-step operation.\n\
5951 In this mode, no other thread may run during a step command.\n\
5952 Other threads may run while stepping over a function call ('next')."),
5953 set_schedlock_func, /* traps on target vector */
5954 show_scheduler_mode,
5955 &setlist, &showlist);
5956
5957 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
5958 Set mode for resuming threads of all processes."), _("\
5959 Show mode for resuming threads of all processes."), _("\
5960 When on, execution commands (such as 'continue' or 'next') resume all\n\
5961 threads of all processes. When off (which is the default), execution\n\
5962 commands only resume the threads of the current process. The set of\n\
5963 threads that are resumed is further refined by the scheduler-locking\n\
5964 mode (see help set scheduler-locking)."),
5965 NULL,
5966 show_schedule_multiple,
5967 &setlist, &showlist);
5968
5969 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
5970 Set mode of the step operation."), _("\
5971 Show mode of the step operation."), _("\
5972 When set, doing a step over a function without debug line information\n\
5973 will stop at the first instruction of that function. Otherwise, the\n\
5974 function is skipped and the step command stops at a different source line."),
5975 NULL,
5976 show_step_stop_if_no_debug,
5977 &setlist, &showlist);
5978
5979 add_setshow_enum_cmd ("displaced-stepping", class_run,
5980 can_use_displaced_stepping_enum,
5981 &can_use_displaced_stepping, _("\
5982 Set debugger's willingness to use displaced stepping."), _("\
5983 Show debugger's willingness to use displaced stepping."), _("\
5984 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
5985 supported by the target architecture. If off, gdb will not use displaced\n\
5986 stepping to step over breakpoints, even if such is supported by the target\n\
5987 architecture. If auto (which is the default), gdb will use displaced stepping\n\
5988 if the target architecture supports it and non-stop mode is active, but will not\n\
5989 use it in all-stop mode (see help set non-stop)."),
5990 NULL,
5991 show_can_use_displaced_stepping,
5992 &setlist, &showlist);
5993
5994 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
5995 &exec_direction, _("Set direction of execution.\n\
5996 Options are 'forward' or 'reverse'."),
5997 _("Show direction of execution (forward/reverse)."),
5998 _("Tells gdb whether to execute forward or backward."),
5999 set_exec_direction_func, show_exec_direction_func,
6000 &setlist, &showlist);
6001
6002 /* ptid initializations */
6003 null_ptid = ptid_build (0, 0, 0);
6004 minus_one_ptid = ptid_build (-1, 0, 0);
6005 inferior_ptid = null_ptid;
6006 target_last_wait_ptid = minus_one_ptid;
6007 displaced_step_ptid = null_ptid;
6008
6009 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6010 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6011 observer_attach_thread_exit (infrun_thread_thread_exit);
6012
6013 /* Explicitly create without lookup, since that tries to create a
6014 value with a void typed value, and when we get here, gdbarch
6015 isn't initialized yet. At this point, we're quite sure there
6016 isn't another convenience variable of the same name. */
6017 create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6018 }
This page took 0.158485 seconds and 4 git commands to generate.