Use exec_file_find to prepend gdb_sysroot in follow_exec
[deliverable/binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2015 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "infrun.h"
23 #include <ctype.h>
24 #include "symtab.h"
25 #include "frame.h"
26 #include "inferior.h"
27 #include "breakpoint.h"
28 #include "gdb_wait.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "cli/cli-script.h"
32 #include "target.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include <signal.h>
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observer.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "dictionary.h"
46 #include "block.h"
47 #include "mi/mi-common.h"
48 #include "event-top.h"
49 #include "record.h"
50 #include "record-full.h"
51 #include "inline-frame.h"
52 #include "jit.h"
53 #include "tracepoint.h"
54 #include "continuations.h"
55 #include "interps.h"
56 #include "skip.h"
57 #include "probe.h"
58 #include "objfiles.h"
59 #include "completer.h"
60 #include "target-descriptions.h"
61 #include "target-dcache.h"
62 #include "terminal.h"
63 #include "solist.h"
64
65 /* Prototypes for local functions */
66
67 static void signals_info (char *, int);
68
69 static void handle_command (char *, int);
70
71 static void sig_print_info (enum gdb_signal);
72
73 static void sig_print_header (void);
74
75 static void resume_cleanups (void *);
76
77 static int hook_stop_stub (void *);
78
79 static int restore_selected_frame (void *);
80
81 static int follow_fork (void);
82
83 static int follow_fork_inferior (int follow_child, int detach_fork);
84
85 static void follow_inferior_reset_breakpoints (void);
86
87 static void set_schedlock_func (char *args, int from_tty,
88 struct cmd_list_element *c);
89
90 static int currently_stepping (struct thread_info *tp);
91
92 void _initialize_infrun (void);
93
94 void nullify_last_target_wait_ptid (void);
95
96 static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
97
98 static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
99
100 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
101
102 static int maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc);
103
104 /* When set, stop the 'step' command if we enter a function which has
105 no line number information. The normal behavior is that we step
106 over such function. */
107 int step_stop_if_no_debug = 0;
108 static void
109 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
110 struct cmd_list_element *c, const char *value)
111 {
112 fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
113 }
114
115 /* In asynchronous mode, but simulating synchronous execution. */
116
117 int sync_execution = 0;
118
119 /* proceed and normal_stop use this to notify the user when the
120 inferior stopped in a different thread than it had been running
121 in. */
122
123 static ptid_t previous_inferior_ptid;
124
125 /* If set (default for legacy reasons), when following a fork, GDB
126 will detach from one of the fork branches, child or parent.
127 Exactly which branch is detached depends on 'set follow-fork-mode'
128 setting. */
129
130 static int detach_fork = 1;
131
132 int debug_displaced = 0;
133 static void
134 show_debug_displaced (struct ui_file *file, int from_tty,
135 struct cmd_list_element *c, const char *value)
136 {
137 fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
138 }
139
140 unsigned int debug_infrun = 0;
141 static void
142 show_debug_infrun (struct ui_file *file, int from_tty,
143 struct cmd_list_element *c, const char *value)
144 {
145 fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
146 }
147
148
149 /* Support for disabling address space randomization. */
150
151 int disable_randomization = 1;
152
153 static void
154 show_disable_randomization (struct ui_file *file, int from_tty,
155 struct cmd_list_element *c, const char *value)
156 {
157 if (target_supports_disable_randomization ())
158 fprintf_filtered (file,
159 _("Disabling randomization of debuggee's "
160 "virtual address space is %s.\n"),
161 value);
162 else
163 fputs_filtered (_("Disabling randomization of debuggee's "
164 "virtual address space is unsupported on\n"
165 "this platform.\n"), file);
166 }
167
168 static void
169 set_disable_randomization (char *args, int from_tty,
170 struct cmd_list_element *c)
171 {
172 if (!target_supports_disable_randomization ())
173 error (_("Disabling randomization of debuggee's "
174 "virtual address space is unsupported on\n"
175 "this platform."));
176 }
177
178 /* User interface for non-stop mode. */
179
180 int non_stop = 0;
181 static int non_stop_1 = 0;
182
183 static void
184 set_non_stop (char *args, int from_tty,
185 struct cmd_list_element *c)
186 {
187 if (target_has_execution)
188 {
189 non_stop_1 = non_stop;
190 error (_("Cannot change this setting while the inferior is running."));
191 }
192
193 non_stop = non_stop_1;
194 }
195
196 static void
197 show_non_stop (struct ui_file *file, int from_tty,
198 struct cmd_list_element *c, const char *value)
199 {
200 fprintf_filtered (file,
201 _("Controlling the inferior in non-stop mode is %s.\n"),
202 value);
203 }
204
205 /* "Observer mode" is somewhat like a more extreme version of
206 non-stop, in which all GDB operations that might affect the
207 target's execution have been disabled. */
208
209 int observer_mode = 0;
210 static int observer_mode_1 = 0;
211
212 static void
213 set_observer_mode (char *args, int from_tty,
214 struct cmd_list_element *c)
215 {
216 if (target_has_execution)
217 {
218 observer_mode_1 = observer_mode;
219 error (_("Cannot change this setting while the inferior is running."));
220 }
221
222 observer_mode = observer_mode_1;
223
224 may_write_registers = !observer_mode;
225 may_write_memory = !observer_mode;
226 may_insert_breakpoints = !observer_mode;
227 may_insert_tracepoints = !observer_mode;
228 /* We can insert fast tracepoints in or out of observer mode,
229 but enable them if we're going into this mode. */
230 if (observer_mode)
231 may_insert_fast_tracepoints = 1;
232 may_stop = !observer_mode;
233 update_target_permissions ();
234
235 /* Going *into* observer mode we must force non-stop, then
236 going out we leave it that way. */
237 if (observer_mode)
238 {
239 pagination_enabled = 0;
240 non_stop = non_stop_1 = 1;
241 }
242
243 if (from_tty)
244 printf_filtered (_("Observer mode is now %s.\n"),
245 (observer_mode ? "on" : "off"));
246 }
247
248 static void
249 show_observer_mode (struct ui_file *file, int from_tty,
250 struct cmd_list_element *c, const char *value)
251 {
252 fprintf_filtered (file, _("Observer mode is %s.\n"), value);
253 }
254
255 /* This updates the value of observer mode based on changes in
256 permissions. Note that we are deliberately ignoring the values of
257 may-write-registers and may-write-memory, since the user may have
258 reason to enable these during a session, for instance to turn on a
259 debugging-related global. */
260
261 void
262 update_observer_mode (void)
263 {
264 int newval;
265
266 newval = (!may_insert_breakpoints
267 && !may_insert_tracepoints
268 && may_insert_fast_tracepoints
269 && !may_stop
270 && non_stop);
271
272 /* Let the user know if things change. */
273 if (newval != observer_mode)
274 printf_filtered (_("Observer mode is now %s.\n"),
275 (newval ? "on" : "off"));
276
277 observer_mode = observer_mode_1 = newval;
278 }
279
280 /* Tables of how to react to signals; the user sets them. */
281
282 static unsigned char *signal_stop;
283 static unsigned char *signal_print;
284 static unsigned char *signal_program;
285
286 /* Table of signals that are registered with "catch signal". A
287 non-zero entry indicates that the signal is caught by some "catch
288 signal" command. This has size GDB_SIGNAL_LAST, to accommodate all
289 signals. */
290 static unsigned char *signal_catch;
291
292 /* Table of signals that the target may silently handle.
293 This is automatically determined from the flags above,
294 and simply cached here. */
295 static unsigned char *signal_pass;
296
297 #define SET_SIGS(nsigs,sigs,flags) \
298 do { \
299 int signum = (nsigs); \
300 while (signum-- > 0) \
301 if ((sigs)[signum]) \
302 (flags)[signum] = 1; \
303 } while (0)
304
305 #define UNSET_SIGS(nsigs,sigs,flags) \
306 do { \
307 int signum = (nsigs); \
308 while (signum-- > 0) \
309 if ((sigs)[signum]) \
310 (flags)[signum] = 0; \
311 } while (0)
312
313 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
314 this function is to avoid exporting `signal_program'. */
315
316 void
317 update_signals_program_target (void)
318 {
319 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
320 }
321
322 /* Value to pass to target_resume() to cause all threads to resume. */
323
324 #define RESUME_ALL minus_one_ptid
325
326 /* Command list pointer for the "stop" placeholder. */
327
328 static struct cmd_list_element *stop_command;
329
330 /* Nonzero if we want to give control to the user when we're notified
331 of shared library events by the dynamic linker. */
332 int stop_on_solib_events;
333
334 /* Enable or disable optional shared library event breakpoints
335 as appropriate when the above flag is changed. */
336
337 static void
338 set_stop_on_solib_events (char *args, int from_tty, struct cmd_list_element *c)
339 {
340 update_solib_breakpoints ();
341 }
342
343 static void
344 show_stop_on_solib_events (struct ui_file *file, int from_tty,
345 struct cmd_list_element *c, const char *value)
346 {
347 fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
348 value);
349 }
350
351 /* Nonzero means expecting a trace trap
352 and should stop the inferior and return silently when it happens. */
353
354 int stop_after_trap;
355
356 /* Save register contents here when executing a "finish" command or are
357 about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
358 Thus this contains the return value from the called function (assuming
359 values are returned in a register). */
360
361 struct regcache *stop_registers;
362
363 /* Nonzero after stop if current stack frame should be printed. */
364
365 static int stop_print_frame;
366
367 /* This is a cached copy of the pid/waitstatus of the last event
368 returned by target_wait()/deprecated_target_wait_hook(). This
369 information is returned by get_last_target_status(). */
370 static ptid_t target_last_wait_ptid;
371 static struct target_waitstatus target_last_waitstatus;
372
373 static void context_switch (ptid_t ptid);
374
375 void init_thread_stepping_state (struct thread_info *tss);
376
377 static const char follow_fork_mode_child[] = "child";
378 static const char follow_fork_mode_parent[] = "parent";
379
380 static const char *const follow_fork_mode_kind_names[] = {
381 follow_fork_mode_child,
382 follow_fork_mode_parent,
383 NULL
384 };
385
386 static const char *follow_fork_mode_string = follow_fork_mode_parent;
387 static void
388 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
389 struct cmd_list_element *c, const char *value)
390 {
391 fprintf_filtered (file,
392 _("Debugger response to a program "
393 "call of fork or vfork is \"%s\".\n"),
394 value);
395 }
396 \f
397
398 /* Handle changes to the inferior list based on the type of fork,
399 which process is being followed, and whether the other process
400 should be detached. On entry inferior_ptid must be the ptid of
401 the fork parent. At return inferior_ptid is the ptid of the
402 followed inferior. */
403
404 static int
405 follow_fork_inferior (int follow_child, int detach_fork)
406 {
407 int has_vforked;
408 ptid_t parent_ptid, child_ptid;
409
410 has_vforked = (inferior_thread ()->pending_follow.kind
411 == TARGET_WAITKIND_VFORKED);
412 parent_ptid = inferior_ptid;
413 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
414
415 if (has_vforked
416 && !non_stop /* Non-stop always resumes both branches. */
417 && (!target_is_async_p () || sync_execution)
418 && !(follow_child || detach_fork || sched_multi))
419 {
420 /* The parent stays blocked inside the vfork syscall until the
421 child execs or exits. If we don't let the child run, then
422 the parent stays blocked. If we're telling the parent to run
423 in the foreground, the user will not be able to ctrl-c to get
424 back the terminal, effectively hanging the debug session. */
425 fprintf_filtered (gdb_stderr, _("\
426 Can not resume the parent process over vfork in the foreground while\n\
427 holding the child stopped. Try \"set detach-on-fork\" or \
428 \"set schedule-multiple\".\n"));
429 /* FIXME output string > 80 columns. */
430 return 1;
431 }
432
433 if (!follow_child)
434 {
435 /* Detach new forked process? */
436 if (detach_fork)
437 {
438 struct cleanup *old_chain;
439
440 /* Before detaching from the child, remove all breakpoints
441 from it. If we forked, then this has already been taken
442 care of by infrun.c. If we vforked however, any
443 breakpoint inserted in the parent is visible in the
444 child, even those added while stopped in a vfork
445 catchpoint. This will remove the breakpoints from the
446 parent also, but they'll be reinserted below. */
447 if (has_vforked)
448 {
449 /* Keep breakpoints list in sync. */
450 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
451 }
452
453 if (info_verbose || debug_infrun)
454 {
455 target_terminal_ours_for_output ();
456 fprintf_filtered (gdb_stdlog,
457 _("Detaching after %s from child %s.\n"),
458 has_vforked ? "vfork" : "fork",
459 target_pid_to_str (child_ptid));
460 }
461 }
462 else
463 {
464 struct inferior *parent_inf, *child_inf;
465 struct cleanup *old_chain;
466
467 /* Add process to GDB's tables. */
468 child_inf = add_inferior (ptid_get_pid (child_ptid));
469
470 parent_inf = current_inferior ();
471 child_inf->attach_flag = parent_inf->attach_flag;
472 copy_terminal_info (child_inf, parent_inf);
473 child_inf->gdbarch = parent_inf->gdbarch;
474 copy_inferior_target_desc_info (child_inf, parent_inf);
475
476 old_chain = save_inferior_ptid ();
477 save_current_program_space ();
478
479 inferior_ptid = child_ptid;
480 add_thread (inferior_ptid);
481 child_inf->symfile_flags = SYMFILE_NO_READ;
482
483 /* If this is a vfork child, then the address-space is
484 shared with the parent. */
485 if (has_vforked)
486 {
487 child_inf->pspace = parent_inf->pspace;
488 child_inf->aspace = parent_inf->aspace;
489
490 /* The parent will be frozen until the child is done
491 with the shared region. Keep track of the
492 parent. */
493 child_inf->vfork_parent = parent_inf;
494 child_inf->pending_detach = 0;
495 parent_inf->vfork_child = child_inf;
496 parent_inf->pending_detach = 0;
497 }
498 else
499 {
500 child_inf->aspace = new_address_space ();
501 child_inf->pspace = add_program_space (child_inf->aspace);
502 child_inf->removable = 1;
503 set_current_program_space (child_inf->pspace);
504 clone_program_space (child_inf->pspace, parent_inf->pspace);
505
506 /* Let the shared library layer (e.g., solib-svr4) learn
507 about this new process, relocate the cloned exec, pull
508 in shared libraries, and install the solib event
509 breakpoint. If a "cloned-VM" event was propagated
510 better throughout the core, this wouldn't be
511 required. */
512 solib_create_inferior_hook (0);
513 }
514
515 do_cleanups (old_chain);
516 }
517
518 if (has_vforked)
519 {
520 struct inferior *parent_inf;
521
522 parent_inf = current_inferior ();
523
524 /* If we detached from the child, then we have to be careful
525 to not insert breakpoints in the parent until the child
526 is done with the shared memory region. However, if we're
527 staying attached to the child, then we can and should
528 insert breakpoints, so that we can debug it. A
529 subsequent child exec or exit is enough to know when does
530 the child stops using the parent's address space. */
531 parent_inf->waiting_for_vfork_done = detach_fork;
532 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
533 }
534 }
535 else
536 {
537 /* Follow the child. */
538 struct inferior *parent_inf, *child_inf;
539 struct program_space *parent_pspace;
540
541 if (info_verbose || debug_infrun)
542 {
543 target_terminal_ours_for_output ();
544 fprintf_filtered (gdb_stdlog,
545 _("Attaching after %s %s to child %s.\n"),
546 target_pid_to_str (parent_ptid),
547 has_vforked ? "vfork" : "fork",
548 target_pid_to_str (child_ptid));
549 }
550
551 /* Add the new inferior first, so that the target_detach below
552 doesn't unpush the target. */
553
554 child_inf = add_inferior (ptid_get_pid (child_ptid));
555
556 parent_inf = current_inferior ();
557 child_inf->attach_flag = parent_inf->attach_flag;
558 copy_terminal_info (child_inf, parent_inf);
559 child_inf->gdbarch = parent_inf->gdbarch;
560 copy_inferior_target_desc_info (child_inf, parent_inf);
561
562 parent_pspace = parent_inf->pspace;
563
564 /* If we're vforking, we want to hold on to the parent until the
565 child exits or execs. At child exec or exit time we can
566 remove the old breakpoints from the parent and detach or
567 resume debugging it. Otherwise, detach the parent now; we'll
568 want to reuse it's program/address spaces, but we can't set
569 them to the child before removing breakpoints from the
570 parent, otherwise, the breakpoints module could decide to
571 remove breakpoints from the wrong process (since they'd be
572 assigned to the same address space). */
573
574 if (has_vforked)
575 {
576 gdb_assert (child_inf->vfork_parent == NULL);
577 gdb_assert (parent_inf->vfork_child == NULL);
578 child_inf->vfork_parent = parent_inf;
579 child_inf->pending_detach = 0;
580 parent_inf->vfork_child = child_inf;
581 parent_inf->pending_detach = detach_fork;
582 parent_inf->waiting_for_vfork_done = 0;
583 }
584 else if (detach_fork)
585 {
586 if (info_verbose || debug_infrun)
587 {
588 target_terminal_ours_for_output ();
589 fprintf_filtered (gdb_stdlog,
590 _("Detaching after fork from "
591 "child %s.\n"),
592 target_pid_to_str (child_ptid));
593 }
594
595 target_detach (NULL, 0);
596 }
597
598 /* Note that the detach above makes PARENT_INF dangling. */
599
600 /* Add the child thread to the appropriate lists, and switch to
601 this new thread, before cloning the program space, and
602 informing the solib layer about this new process. */
603
604 inferior_ptid = child_ptid;
605 add_thread (inferior_ptid);
606
607 /* If this is a vfork child, then the address-space is shared
608 with the parent. If we detached from the parent, then we can
609 reuse the parent's program/address spaces. */
610 if (has_vforked || detach_fork)
611 {
612 child_inf->pspace = parent_pspace;
613 child_inf->aspace = child_inf->pspace->aspace;
614 }
615 else
616 {
617 child_inf->aspace = new_address_space ();
618 child_inf->pspace = add_program_space (child_inf->aspace);
619 child_inf->removable = 1;
620 child_inf->symfile_flags = SYMFILE_NO_READ;
621 set_current_program_space (child_inf->pspace);
622 clone_program_space (child_inf->pspace, parent_pspace);
623
624 /* Let the shared library layer (e.g., solib-svr4) learn
625 about this new process, relocate the cloned exec, pull in
626 shared libraries, and install the solib event breakpoint.
627 If a "cloned-VM" event was propagated better throughout
628 the core, this wouldn't be required. */
629 solib_create_inferior_hook (0);
630 }
631 }
632
633 return target_follow_fork (follow_child, detach_fork);
634 }
635
636 /* Tell the target to follow the fork we're stopped at. Returns true
637 if the inferior should be resumed; false, if the target for some
638 reason decided it's best not to resume. */
639
640 static int
641 follow_fork (void)
642 {
643 int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
644 int should_resume = 1;
645 struct thread_info *tp;
646
647 /* Copy user stepping state to the new inferior thread. FIXME: the
648 followed fork child thread should have a copy of most of the
649 parent thread structure's run control related fields, not just these.
650 Initialized to avoid "may be used uninitialized" warnings from gcc. */
651 struct breakpoint *step_resume_breakpoint = NULL;
652 struct breakpoint *exception_resume_breakpoint = NULL;
653 CORE_ADDR step_range_start = 0;
654 CORE_ADDR step_range_end = 0;
655 struct frame_id step_frame_id = { 0 };
656 struct interp *command_interp = NULL;
657
658 if (!non_stop)
659 {
660 ptid_t wait_ptid;
661 struct target_waitstatus wait_status;
662
663 /* Get the last target status returned by target_wait(). */
664 get_last_target_status (&wait_ptid, &wait_status);
665
666 /* If not stopped at a fork event, then there's nothing else to
667 do. */
668 if (wait_status.kind != TARGET_WAITKIND_FORKED
669 && wait_status.kind != TARGET_WAITKIND_VFORKED)
670 return 1;
671
672 /* Check if we switched over from WAIT_PTID, since the event was
673 reported. */
674 if (!ptid_equal (wait_ptid, minus_one_ptid)
675 && !ptid_equal (inferior_ptid, wait_ptid))
676 {
677 /* We did. Switch back to WAIT_PTID thread, to tell the
678 target to follow it (in either direction). We'll
679 afterwards refuse to resume, and inform the user what
680 happened. */
681 switch_to_thread (wait_ptid);
682 should_resume = 0;
683 }
684 }
685
686 tp = inferior_thread ();
687
688 /* If there were any forks/vforks that were caught and are now to be
689 followed, then do so now. */
690 switch (tp->pending_follow.kind)
691 {
692 case TARGET_WAITKIND_FORKED:
693 case TARGET_WAITKIND_VFORKED:
694 {
695 ptid_t parent, child;
696
697 /* If the user did a next/step, etc, over a fork call,
698 preserve the stepping state in the fork child. */
699 if (follow_child && should_resume)
700 {
701 step_resume_breakpoint = clone_momentary_breakpoint
702 (tp->control.step_resume_breakpoint);
703 step_range_start = tp->control.step_range_start;
704 step_range_end = tp->control.step_range_end;
705 step_frame_id = tp->control.step_frame_id;
706 exception_resume_breakpoint
707 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
708 command_interp = tp->control.command_interp;
709
710 /* For now, delete the parent's sr breakpoint, otherwise,
711 parent/child sr breakpoints are considered duplicates,
712 and the child version will not be installed. Remove
713 this when the breakpoints module becomes aware of
714 inferiors and address spaces. */
715 delete_step_resume_breakpoint (tp);
716 tp->control.step_range_start = 0;
717 tp->control.step_range_end = 0;
718 tp->control.step_frame_id = null_frame_id;
719 delete_exception_resume_breakpoint (tp);
720 tp->control.command_interp = NULL;
721 }
722
723 parent = inferior_ptid;
724 child = tp->pending_follow.value.related_pid;
725
726 /* Set up inferior(s) as specified by the caller, and tell the
727 target to do whatever is necessary to follow either parent
728 or child. */
729 if (follow_fork_inferior (follow_child, detach_fork))
730 {
731 /* Target refused to follow, or there's some other reason
732 we shouldn't resume. */
733 should_resume = 0;
734 }
735 else
736 {
737 /* This pending follow fork event is now handled, one way
738 or another. The previous selected thread may be gone
739 from the lists by now, but if it is still around, need
740 to clear the pending follow request. */
741 tp = find_thread_ptid (parent);
742 if (tp)
743 tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
744
745 /* This makes sure we don't try to apply the "Switched
746 over from WAIT_PID" logic above. */
747 nullify_last_target_wait_ptid ();
748
749 /* If we followed the child, switch to it... */
750 if (follow_child)
751 {
752 switch_to_thread (child);
753
754 /* ... and preserve the stepping state, in case the
755 user was stepping over the fork call. */
756 if (should_resume)
757 {
758 tp = inferior_thread ();
759 tp->control.step_resume_breakpoint
760 = step_resume_breakpoint;
761 tp->control.step_range_start = step_range_start;
762 tp->control.step_range_end = step_range_end;
763 tp->control.step_frame_id = step_frame_id;
764 tp->control.exception_resume_breakpoint
765 = exception_resume_breakpoint;
766 tp->control.command_interp = command_interp;
767 }
768 else
769 {
770 /* If we get here, it was because we're trying to
771 resume from a fork catchpoint, but, the user
772 has switched threads away from the thread that
773 forked. In that case, the resume command
774 issued is most likely not applicable to the
775 child, so just warn, and refuse to resume. */
776 warning (_("Not resuming: switched threads "
777 "before following fork child.\n"));
778 }
779
780 /* Reset breakpoints in the child as appropriate. */
781 follow_inferior_reset_breakpoints ();
782 }
783 else
784 switch_to_thread (parent);
785 }
786 }
787 break;
788 case TARGET_WAITKIND_SPURIOUS:
789 /* Nothing to follow. */
790 break;
791 default:
792 internal_error (__FILE__, __LINE__,
793 "Unexpected pending_follow.kind %d\n",
794 tp->pending_follow.kind);
795 break;
796 }
797
798 return should_resume;
799 }
800
801 static void
802 follow_inferior_reset_breakpoints (void)
803 {
804 struct thread_info *tp = inferior_thread ();
805
806 /* Was there a step_resume breakpoint? (There was if the user
807 did a "next" at the fork() call.) If so, explicitly reset its
808 thread number. Cloned step_resume breakpoints are disabled on
809 creation, so enable it here now that it is associated with the
810 correct thread.
811
812 step_resumes are a form of bp that are made to be per-thread.
813 Since we created the step_resume bp when the parent process
814 was being debugged, and now are switching to the child process,
815 from the breakpoint package's viewpoint, that's a switch of
816 "threads". We must update the bp's notion of which thread
817 it is for, or it'll be ignored when it triggers. */
818
819 if (tp->control.step_resume_breakpoint)
820 {
821 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
822 tp->control.step_resume_breakpoint->loc->enabled = 1;
823 }
824
825 /* Treat exception_resume breakpoints like step_resume breakpoints. */
826 if (tp->control.exception_resume_breakpoint)
827 {
828 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
829 tp->control.exception_resume_breakpoint->loc->enabled = 1;
830 }
831
832 /* Reinsert all breakpoints in the child. The user may have set
833 breakpoints after catching the fork, in which case those
834 were never set in the child, but only in the parent. This makes
835 sure the inserted breakpoints match the breakpoint list. */
836
837 breakpoint_re_set ();
838 insert_breakpoints ();
839 }
840
841 /* The child has exited or execed: resume threads of the parent the
842 user wanted to be executing. */
843
844 static int
845 proceed_after_vfork_done (struct thread_info *thread,
846 void *arg)
847 {
848 int pid = * (int *) arg;
849
850 if (ptid_get_pid (thread->ptid) == pid
851 && is_running (thread->ptid)
852 && !is_executing (thread->ptid)
853 && !thread->stop_requested
854 && thread->suspend.stop_signal == GDB_SIGNAL_0)
855 {
856 if (debug_infrun)
857 fprintf_unfiltered (gdb_stdlog,
858 "infrun: resuming vfork parent thread %s\n",
859 target_pid_to_str (thread->ptid));
860
861 switch_to_thread (thread->ptid);
862 clear_proceed_status (0);
863 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
864 }
865
866 return 0;
867 }
868
869 /* Called whenever we notice an exec or exit event, to handle
870 detaching or resuming a vfork parent. */
871
872 static void
873 handle_vfork_child_exec_or_exit (int exec)
874 {
875 struct inferior *inf = current_inferior ();
876
877 if (inf->vfork_parent)
878 {
879 int resume_parent = -1;
880
881 /* This exec or exit marks the end of the shared memory region
882 between the parent and the child. If the user wanted to
883 detach from the parent, now is the time. */
884
885 if (inf->vfork_parent->pending_detach)
886 {
887 struct thread_info *tp;
888 struct cleanup *old_chain;
889 struct program_space *pspace;
890 struct address_space *aspace;
891
892 /* follow-fork child, detach-on-fork on. */
893
894 inf->vfork_parent->pending_detach = 0;
895
896 if (!exec)
897 {
898 /* If we're handling a child exit, then inferior_ptid
899 points at the inferior's pid, not to a thread. */
900 old_chain = save_inferior_ptid ();
901 save_current_program_space ();
902 save_current_inferior ();
903 }
904 else
905 old_chain = save_current_space_and_thread ();
906
907 /* We're letting loose of the parent. */
908 tp = any_live_thread_of_process (inf->vfork_parent->pid);
909 switch_to_thread (tp->ptid);
910
911 /* We're about to detach from the parent, which implicitly
912 removes breakpoints from its address space. There's a
913 catch here: we want to reuse the spaces for the child,
914 but, parent/child are still sharing the pspace at this
915 point, although the exec in reality makes the kernel give
916 the child a fresh set of new pages. The problem here is
917 that the breakpoints module being unaware of this, would
918 likely chose the child process to write to the parent
919 address space. Swapping the child temporarily away from
920 the spaces has the desired effect. Yes, this is "sort
921 of" a hack. */
922
923 pspace = inf->pspace;
924 aspace = inf->aspace;
925 inf->aspace = NULL;
926 inf->pspace = NULL;
927
928 if (debug_infrun || info_verbose)
929 {
930 target_terminal_ours_for_output ();
931
932 if (exec)
933 {
934 fprintf_filtered (gdb_stdlog,
935 _("Detaching vfork parent process "
936 "%d after child exec.\n"),
937 inf->vfork_parent->pid);
938 }
939 else
940 {
941 fprintf_filtered (gdb_stdlog,
942 _("Detaching vfork parent process "
943 "%d after child exit.\n"),
944 inf->vfork_parent->pid);
945 }
946 }
947
948 target_detach (NULL, 0);
949
950 /* Put it back. */
951 inf->pspace = pspace;
952 inf->aspace = aspace;
953
954 do_cleanups (old_chain);
955 }
956 else if (exec)
957 {
958 /* We're staying attached to the parent, so, really give the
959 child a new address space. */
960 inf->pspace = add_program_space (maybe_new_address_space ());
961 inf->aspace = inf->pspace->aspace;
962 inf->removable = 1;
963 set_current_program_space (inf->pspace);
964
965 resume_parent = inf->vfork_parent->pid;
966
967 /* Break the bonds. */
968 inf->vfork_parent->vfork_child = NULL;
969 }
970 else
971 {
972 struct cleanup *old_chain;
973 struct program_space *pspace;
974
975 /* If this is a vfork child exiting, then the pspace and
976 aspaces were shared with the parent. Since we're
977 reporting the process exit, we'll be mourning all that is
978 found in the address space, and switching to null_ptid,
979 preparing to start a new inferior. But, since we don't
980 want to clobber the parent's address/program spaces, we
981 go ahead and create a new one for this exiting
982 inferior. */
983
984 /* Switch to null_ptid, so that clone_program_space doesn't want
985 to read the selected frame of a dead process. */
986 old_chain = save_inferior_ptid ();
987 inferior_ptid = null_ptid;
988
989 /* This inferior is dead, so avoid giving the breakpoints
990 module the option to write through to it (cloning a
991 program space resets breakpoints). */
992 inf->aspace = NULL;
993 inf->pspace = NULL;
994 pspace = add_program_space (maybe_new_address_space ());
995 set_current_program_space (pspace);
996 inf->removable = 1;
997 inf->symfile_flags = SYMFILE_NO_READ;
998 clone_program_space (pspace, inf->vfork_parent->pspace);
999 inf->pspace = pspace;
1000 inf->aspace = pspace->aspace;
1001
1002 /* Put back inferior_ptid. We'll continue mourning this
1003 inferior. */
1004 do_cleanups (old_chain);
1005
1006 resume_parent = inf->vfork_parent->pid;
1007 /* Break the bonds. */
1008 inf->vfork_parent->vfork_child = NULL;
1009 }
1010
1011 inf->vfork_parent = NULL;
1012
1013 gdb_assert (current_program_space == inf->pspace);
1014
1015 if (non_stop && resume_parent != -1)
1016 {
1017 /* If the user wanted the parent to be running, let it go
1018 free now. */
1019 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
1020
1021 if (debug_infrun)
1022 fprintf_unfiltered (gdb_stdlog,
1023 "infrun: resuming vfork parent process %d\n",
1024 resume_parent);
1025
1026 iterate_over_threads (proceed_after_vfork_done, &resume_parent);
1027
1028 do_cleanups (old_chain);
1029 }
1030 }
1031 }
1032
1033 /* Enum strings for "set|show follow-exec-mode". */
1034
1035 static const char follow_exec_mode_new[] = "new";
1036 static const char follow_exec_mode_same[] = "same";
1037 static const char *const follow_exec_mode_names[] =
1038 {
1039 follow_exec_mode_new,
1040 follow_exec_mode_same,
1041 NULL,
1042 };
1043
1044 static const char *follow_exec_mode_string = follow_exec_mode_same;
1045 static void
1046 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1047 struct cmd_list_element *c, const char *value)
1048 {
1049 fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"), value);
1050 }
1051
1052 /* EXECD_PATHNAME is assumed to be non-NULL. */
1053
1054 static void
1055 follow_exec (ptid_t ptid, char *execd_pathname)
1056 {
1057 struct thread_info *th, *tmp;
1058 struct inferior *inf = current_inferior ();
1059 int pid = ptid_get_pid (ptid);
1060
1061 /* This is an exec event that we actually wish to pay attention to.
1062 Refresh our symbol table to the newly exec'd program, remove any
1063 momentary bp's, etc.
1064
1065 If there are breakpoints, they aren't really inserted now,
1066 since the exec() transformed our inferior into a fresh set
1067 of instructions.
1068
1069 We want to preserve symbolic breakpoints on the list, since
1070 we have hopes that they can be reset after the new a.out's
1071 symbol table is read.
1072
1073 However, any "raw" breakpoints must be removed from the list
1074 (e.g., the solib bp's), since their address is probably invalid
1075 now.
1076
1077 And, we DON'T want to call delete_breakpoints() here, since
1078 that may write the bp's "shadow contents" (the instruction
1079 value that was overwritten witha TRAP instruction). Since
1080 we now have a new a.out, those shadow contents aren't valid. */
1081
1082 mark_breakpoints_out ();
1083
1084 /* The target reports the exec event to the main thread, even if
1085 some other thread does the exec, and even if the main thread was
1086 stopped or already gone. We may still have non-leader threads of
1087 the process on our list. E.g., on targets that don't have thread
1088 exit events (like remote); or on native Linux in non-stop mode if
1089 there were only two threads in the inferior and the non-leader
1090 one is the one that execs (and nothing forces an update of the
1091 thread list up to here). When debugging remotely, it's best to
1092 avoid extra traffic, when possible, so avoid syncing the thread
1093 list with the target, and instead go ahead and delete all threads
1094 of the process but one that reported the event. Note this must
1095 be done before calling update_breakpoints_after_exec, as
1096 otherwise clearing the threads' resources would reference stale
1097 thread breakpoints -- it may have been one of these threads that
1098 stepped across the exec. We could just clear their stepping
1099 states, but as long as we're iterating, might as well delete
1100 them. Deleting them now rather than at the next user-visible
1101 stop provides a nicer sequence of events for user and MI
1102 notifications. */
1103 ALL_THREADS_SAFE (th, tmp)
1104 if (ptid_get_pid (th->ptid) == pid && !ptid_equal (th->ptid, ptid))
1105 delete_thread (th->ptid);
1106
1107 /* We also need to clear any left over stale state for the
1108 leader/event thread. E.g., if there was any step-resume
1109 breakpoint or similar, it's gone now. We cannot truly
1110 step-to-next statement through an exec(). */
1111 th = inferior_thread ();
1112 th->control.step_resume_breakpoint = NULL;
1113 th->control.exception_resume_breakpoint = NULL;
1114 th->control.single_step_breakpoints = NULL;
1115 th->control.step_range_start = 0;
1116 th->control.step_range_end = 0;
1117
1118 /* The user may have had the main thread held stopped in the
1119 previous image (e.g., schedlock on, or non-stop). Release
1120 it now. */
1121 th->stop_requested = 0;
1122
1123 update_breakpoints_after_exec ();
1124
1125 /* What is this a.out's name? */
1126 printf_unfiltered (_("%s is executing new program: %s\n"),
1127 target_pid_to_str (inferior_ptid),
1128 execd_pathname);
1129
1130 /* We've followed the inferior through an exec. Therefore, the
1131 inferior has essentially been killed & reborn. */
1132
1133 gdb_flush (gdb_stdout);
1134
1135 breakpoint_init_inferior (inf_execd);
1136
1137 if (gdb_sysroot != NULL && *gdb_sysroot != '\0')
1138 {
1139 int fd = -1;
1140 char *name;
1141
1142 name = exec_file_find (execd_pathname, &fd);
1143 if (fd >= 0)
1144 close (fd);
1145
1146 execd_pathname = alloca (strlen (name) + 1);
1147 strcpy (execd_pathname, name);
1148 xfree (name);
1149 }
1150
1151 /* Reset the shared library package. This ensures that we get a
1152 shlib event when the child reaches "_start", at which point the
1153 dld will have had a chance to initialize the child. */
1154 /* Also, loading a symbol file below may trigger symbol lookups, and
1155 we don't want those to be satisfied by the libraries of the
1156 previous incarnation of this process. */
1157 no_shared_libraries (NULL, 0);
1158
1159 if (follow_exec_mode_string == follow_exec_mode_new)
1160 {
1161 struct program_space *pspace;
1162
1163 /* The user wants to keep the old inferior and program spaces
1164 around. Create a new fresh one, and switch to it. */
1165
1166 inf = add_inferior (current_inferior ()->pid);
1167 pspace = add_program_space (maybe_new_address_space ());
1168 inf->pspace = pspace;
1169 inf->aspace = pspace->aspace;
1170
1171 exit_inferior_num_silent (current_inferior ()->num);
1172
1173 set_current_inferior (inf);
1174 set_current_program_space (pspace);
1175 }
1176 else
1177 {
1178 /* The old description may no longer be fit for the new image.
1179 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1180 old description; we'll read a new one below. No need to do
1181 this on "follow-exec-mode new", as the old inferior stays
1182 around (its description is later cleared/refetched on
1183 restart). */
1184 target_clear_description ();
1185 }
1186
1187 gdb_assert (current_program_space == inf->pspace);
1188
1189 /* That a.out is now the one to use. */
1190 exec_file_attach (execd_pathname, 0);
1191
1192 /* SYMFILE_DEFER_BP_RESET is used as the proper displacement for PIE
1193 (Position Independent Executable) main symbol file will get applied by
1194 solib_create_inferior_hook below. breakpoint_re_set would fail to insert
1195 the breakpoints with the zero displacement. */
1196
1197 symbol_file_add (execd_pathname,
1198 (inf->symfile_flags
1199 | SYMFILE_MAINLINE | SYMFILE_DEFER_BP_RESET),
1200 NULL, 0);
1201
1202 if ((inf->symfile_flags & SYMFILE_NO_READ) == 0)
1203 set_initial_language ();
1204
1205 /* If the target can specify a description, read it. Must do this
1206 after flipping to the new executable (because the target supplied
1207 description must be compatible with the executable's
1208 architecture, and the old executable may e.g., be 32-bit, while
1209 the new one 64-bit), and before anything involving memory or
1210 registers. */
1211 target_find_description ();
1212
1213 solib_create_inferior_hook (0);
1214
1215 jit_inferior_created_hook ();
1216
1217 breakpoint_re_set ();
1218
1219 /* Reinsert all breakpoints. (Those which were symbolic have
1220 been reset to the proper address in the new a.out, thanks
1221 to symbol_file_command...). */
1222 insert_breakpoints ();
1223
1224 /* The next resume of this inferior should bring it to the shlib
1225 startup breakpoints. (If the user had also set bp's on
1226 "main" from the old (parent) process, then they'll auto-
1227 matically get reset there in the new process.). */
1228 }
1229
1230 /* Info about an instruction that is being stepped over. */
1231
1232 struct step_over_info
1233 {
1234 /* If we're stepping past a breakpoint, this is the address space
1235 and address of the instruction the breakpoint is set at. We'll
1236 skip inserting all breakpoints here. Valid iff ASPACE is
1237 non-NULL. */
1238 struct address_space *aspace;
1239 CORE_ADDR address;
1240
1241 /* The instruction being stepped over triggers a nonsteppable
1242 watchpoint. If true, we'll skip inserting watchpoints. */
1243 int nonsteppable_watchpoint_p;
1244 };
1245
1246 /* The step-over info of the location that is being stepped over.
1247
1248 Note that with async/breakpoint always-inserted mode, a user might
1249 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1250 being stepped over. As setting a new breakpoint inserts all
1251 breakpoints, we need to make sure the breakpoint being stepped over
1252 isn't inserted then. We do that by only clearing the step-over
1253 info when the step-over is actually finished (or aborted).
1254
1255 Presently GDB can only step over one breakpoint at any given time.
1256 Given threads that can't run code in the same address space as the
1257 breakpoint's can't really miss the breakpoint, GDB could be taught
1258 to step-over at most one breakpoint per address space (so this info
1259 could move to the address space object if/when GDB is extended).
1260 The set of breakpoints being stepped over will normally be much
1261 smaller than the set of all breakpoints, so a flag in the
1262 breakpoint location structure would be wasteful. A separate list
1263 also saves complexity and run-time, as otherwise we'd have to go
1264 through all breakpoint locations clearing their flag whenever we
1265 start a new sequence. Similar considerations weigh against storing
1266 this info in the thread object. Plus, not all step overs actually
1267 have breakpoint locations -- e.g., stepping past a single-step
1268 breakpoint, or stepping to complete a non-continuable
1269 watchpoint. */
1270 static struct step_over_info step_over_info;
1271
1272 /* Record the address of the breakpoint/instruction we're currently
1273 stepping over. */
1274
1275 static void
1276 set_step_over_info (struct address_space *aspace, CORE_ADDR address,
1277 int nonsteppable_watchpoint_p)
1278 {
1279 step_over_info.aspace = aspace;
1280 step_over_info.address = address;
1281 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1282 }
1283
1284 /* Called when we're not longer stepping over a breakpoint / an
1285 instruction, so all breakpoints are free to be (re)inserted. */
1286
1287 static void
1288 clear_step_over_info (void)
1289 {
1290 step_over_info.aspace = NULL;
1291 step_over_info.address = 0;
1292 step_over_info.nonsteppable_watchpoint_p = 0;
1293 }
1294
1295 /* See infrun.h. */
1296
1297 int
1298 stepping_past_instruction_at (struct address_space *aspace,
1299 CORE_ADDR address)
1300 {
1301 return (step_over_info.aspace != NULL
1302 && breakpoint_address_match (aspace, address,
1303 step_over_info.aspace,
1304 step_over_info.address));
1305 }
1306
1307 /* See infrun.h. */
1308
1309 int
1310 stepping_past_nonsteppable_watchpoint (void)
1311 {
1312 return step_over_info.nonsteppable_watchpoint_p;
1313 }
1314
1315 /* Returns true if step-over info is valid. */
1316
1317 static int
1318 step_over_info_valid_p (void)
1319 {
1320 return (step_over_info.aspace != NULL
1321 || stepping_past_nonsteppable_watchpoint ());
1322 }
1323
1324 \f
1325 /* Displaced stepping. */
1326
1327 /* In non-stop debugging mode, we must take special care to manage
1328 breakpoints properly; in particular, the traditional strategy for
1329 stepping a thread past a breakpoint it has hit is unsuitable.
1330 'Displaced stepping' is a tactic for stepping one thread past a
1331 breakpoint it has hit while ensuring that other threads running
1332 concurrently will hit the breakpoint as they should.
1333
1334 The traditional way to step a thread T off a breakpoint in a
1335 multi-threaded program in all-stop mode is as follows:
1336
1337 a0) Initially, all threads are stopped, and breakpoints are not
1338 inserted.
1339 a1) We single-step T, leaving breakpoints uninserted.
1340 a2) We insert breakpoints, and resume all threads.
1341
1342 In non-stop debugging, however, this strategy is unsuitable: we
1343 don't want to have to stop all threads in the system in order to
1344 continue or step T past a breakpoint. Instead, we use displaced
1345 stepping:
1346
1347 n0) Initially, T is stopped, other threads are running, and
1348 breakpoints are inserted.
1349 n1) We copy the instruction "under" the breakpoint to a separate
1350 location, outside the main code stream, making any adjustments
1351 to the instruction, register, and memory state as directed by
1352 T's architecture.
1353 n2) We single-step T over the instruction at its new location.
1354 n3) We adjust the resulting register and memory state as directed
1355 by T's architecture. This includes resetting T's PC to point
1356 back into the main instruction stream.
1357 n4) We resume T.
1358
1359 This approach depends on the following gdbarch methods:
1360
1361 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1362 indicate where to copy the instruction, and how much space must
1363 be reserved there. We use these in step n1.
1364
1365 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1366 address, and makes any necessary adjustments to the instruction,
1367 register contents, and memory. We use this in step n1.
1368
1369 - gdbarch_displaced_step_fixup adjusts registers and memory after
1370 we have successfuly single-stepped the instruction, to yield the
1371 same effect the instruction would have had if we had executed it
1372 at its original address. We use this in step n3.
1373
1374 - gdbarch_displaced_step_free_closure provides cleanup.
1375
1376 The gdbarch_displaced_step_copy_insn and
1377 gdbarch_displaced_step_fixup functions must be written so that
1378 copying an instruction with gdbarch_displaced_step_copy_insn,
1379 single-stepping across the copied instruction, and then applying
1380 gdbarch_displaced_insn_fixup should have the same effects on the
1381 thread's memory and registers as stepping the instruction in place
1382 would have. Exactly which responsibilities fall to the copy and
1383 which fall to the fixup is up to the author of those functions.
1384
1385 See the comments in gdbarch.sh for details.
1386
1387 Note that displaced stepping and software single-step cannot
1388 currently be used in combination, although with some care I think
1389 they could be made to. Software single-step works by placing
1390 breakpoints on all possible subsequent instructions; if the
1391 displaced instruction is a PC-relative jump, those breakpoints
1392 could fall in very strange places --- on pages that aren't
1393 executable, or at addresses that are not proper instruction
1394 boundaries. (We do generally let other threads run while we wait
1395 to hit the software single-step breakpoint, and they might
1396 encounter such a corrupted instruction.) One way to work around
1397 this would be to have gdbarch_displaced_step_copy_insn fully
1398 simulate the effect of PC-relative instructions (and return NULL)
1399 on architectures that use software single-stepping.
1400
1401 In non-stop mode, we can have independent and simultaneous step
1402 requests, so more than one thread may need to simultaneously step
1403 over a breakpoint. The current implementation assumes there is
1404 only one scratch space per process. In this case, we have to
1405 serialize access to the scratch space. If thread A wants to step
1406 over a breakpoint, but we are currently waiting for some other
1407 thread to complete a displaced step, we leave thread A stopped and
1408 place it in the displaced_step_request_queue. Whenever a displaced
1409 step finishes, we pick the next thread in the queue and start a new
1410 displaced step operation on it. See displaced_step_prepare and
1411 displaced_step_fixup for details. */
1412
1413 struct displaced_step_request
1414 {
1415 ptid_t ptid;
1416 struct displaced_step_request *next;
1417 };
1418
1419 /* Per-inferior displaced stepping state. */
1420 struct displaced_step_inferior_state
1421 {
1422 /* Pointer to next in linked list. */
1423 struct displaced_step_inferior_state *next;
1424
1425 /* The process this displaced step state refers to. */
1426 int pid;
1427
1428 /* A queue of pending displaced stepping requests. One entry per
1429 thread that needs to do a displaced step. */
1430 struct displaced_step_request *step_request_queue;
1431
1432 /* If this is not null_ptid, this is the thread carrying out a
1433 displaced single-step in process PID. This thread's state will
1434 require fixing up once it has completed its step. */
1435 ptid_t step_ptid;
1436
1437 /* The architecture the thread had when we stepped it. */
1438 struct gdbarch *step_gdbarch;
1439
1440 /* The closure provided gdbarch_displaced_step_copy_insn, to be used
1441 for post-step cleanup. */
1442 struct displaced_step_closure *step_closure;
1443
1444 /* The address of the original instruction, and the copy we
1445 made. */
1446 CORE_ADDR step_original, step_copy;
1447
1448 /* Saved contents of copy area. */
1449 gdb_byte *step_saved_copy;
1450 };
1451
1452 /* The list of states of processes involved in displaced stepping
1453 presently. */
1454 static struct displaced_step_inferior_state *displaced_step_inferior_states;
1455
1456 /* Get the displaced stepping state of process PID. */
1457
1458 static struct displaced_step_inferior_state *
1459 get_displaced_stepping_state (int pid)
1460 {
1461 struct displaced_step_inferior_state *state;
1462
1463 for (state = displaced_step_inferior_states;
1464 state != NULL;
1465 state = state->next)
1466 if (state->pid == pid)
1467 return state;
1468
1469 return NULL;
1470 }
1471
1472 /* Return true if process PID has a thread doing a displaced step. */
1473
1474 static int
1475 displaced_step_in_progress (int pid)
1476 {
1477 struct displaced_step_inferior_state *displaced;
1478
1479 displaced = get_displaced_stepping_state (pid);
1480 if (displaced != NULL && !ptid_equal (displaced->step_ptid, null_ptid))
1481 return 1;
1482
1483 return 0;
1484 }
1485
1486 /* Add a new displaced stepping state for process PID to the displaced
1487 stepping state list, or return a pointer to an already existing
1488 entry, if it already exists. Never returns NULL. */
1489
1490 static struct displaced_step_inferior_state *
1491 add_displaced_stepping_state (int pid)
1492 {
1493 struct displaced_step_inferior_state *state;
1494
1495 for (state = displaced_step_inferior_states;
1496 state != NULL;
1497 state = state->next)
1498 if (state->pid == pid)
1499 return state;
1500
1501 state = xcalloc (1, sizeof (*state));
1502 state->pid = pid;
1503 state->next = displaced_step_inferior_states;
1504 displaced_step_inferior_states = state;
1505
1506 return state;
1507 }
1508
1509 /* If inferior is in displaced stepping, and ADDR equals to starting address
1510 of copy area, return corresponding displaced_step_closure. Otherwise,
1511 return NULL. */
1512
1513 struct displaced_step_closure*
1514 get_displaced_step_closure_by_addr (CORE_ADDR addr)
1515 {
1516 struct displaced_step_inferior_state *displaced
1517 = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1518
1519 /* If checking the mode of displaced instruction in copy area. */
1520 if (displaced && !ptid_equal (displaced->step_ptid, null_ptid)
1521 && (displaced->step_copy == addr))
1522 return displaced->step_closure;
1523
1524 return NULL;
1525 }
1526
1527 /* Remove the displaced stepping state of process PID. */
1528
1529 static void
1530 remove_displaced_stepping_state (int pid)
1531 {
1532 struct displaced_step_inferior_state *it, **prev_next_p;
1533
1534 gdb_assert (pid != 0);
1535
1536 it = displaced_step_inferior_states;
1537 prev_next_p = &displaced_step_inferior_states;
1538 while (it)
1539 {
1540 if (it->pid == pid)
1541 {
1542 *prev_next_p = it->next;
1543 xfree (it);
1544 return;
1545 }
1546
1547 prev_next_p = &it->next;
1548 it = *prev_next_p;
1549 }
1550 }
1551
1552 static void
1553 infrun_inferior_exit (struct inferior *inf)
1554 {
1555 remove_displaced_stepping_state (inf->pid);
1556 }
1557
1558 /* If ON, and the architecture supports it, GDB will use displaced
1559 stepping to step over breakpoints. If OFF, or if the architecture
1560 doesn't support it, GDB will instead use the traditional
1561 hold-and-step approach. If AUTO (which is the default), GDB will
1562 decide which technique to use to step over breakpoints depending on
1563 which of all-stop or non-stop mode is active --- displaced stepping
1564 in non-stop mode; hold-and-step in all-stop mode. */
1565
1566 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1567
1568 static void
1569 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1570 struct cmd_list_element *c,
1571 const char *value)
1572 {
1573 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1574 fprintf_filtered (file,
1575 _("Debugger's willingness to use displaced stepping "
1576 "to step over breakpoints is %s (currently %s).\n"),
1577 value, non_stop ? "on" : "off");
1578 else
1579 fprintf_filtered (file,
1580 _("Debugger's willingness to use displaced stepping "
1581 "to step over breakpoints is %s.\n"), value);
1582 }
1583
1584 /* Return non-zero if displaced stepping can/should be used to step
1585 over breakpoints. */
1586
1587 static int
1588 use_displaced_stepping (struct gdbarch *gdbarch)
1589 {
1590 return (((can_use_displaced_stepping == AUTO_BOOLEAN_AUTO && non_stop)
1591 || can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1592 && gdbarch_displaced_step_copy_insn_p (gdbarch)
1593 && find_record_target () == NULL);
1594 }
1595
1596 /* Clean out any stray displaced stepping state. */
1597 static void
1598 displaced_step_clear (struct displaced_step_inferior_state *displaced)
1599 {
1600 /* Indicate that there is no cleanup pending. */
1601 displaced->step_ptid = null_ptid;
1602
1603 if (displaced->step_closure)
1604 {
1605 gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1606 displaced->step_closure);
1607 displaced->step_closure = NULL;
1608 }
1609 }
1610
1611 static void
1612 displaced_step_clear_cleanup (void *arg)
1613 {
1614 struct displaced_step_inferior_state *state = arg;
1615
1616 displaced_step_clear (state);
1617 }
1618
1619 /* Dump LEN bytes at BUF in hex to FILE, followed by a newline. */
1620 void
1621 displaced_step_dump_bytes (struct ui_file *file,
1622 const gdb_byte *buf,
1623 size_t len)
1624 {
1625 int i;
1626
1627 for (i = 0; i < len; i++)
1628 fprintf_unfiltered (file, "%02x ", buf[i]);
1629 fputs_unfiltered ("\n", file);
1630 }
1631
1632 /* Prepare to single-step, using displaced stepping.
1633
1634 Note that we cannot use displaced stepping when we have a signal to
1635 deliver. If we have a signal to deliver and an instruction to step
1636 over, then after the step, there will be no indication from the
1637 target whether the thread entered a signal handler or ignored the
1638 signal and stepped over the instruction successfully --- both cases
1639 result in a simple SIGTRAP. In the first case we mustn't do a
1640 fixup, and in the second case we must --- but we can't tell which.
1641 Comments in the code for 'random signals' in handle_inferior_event
1642 explain how we handle this case instead.
1643
1644 Returns 1 if preparing was successful -- this thread is going to be
1645 stepped now; or 0 if displaced stepping this thread got queued. */
1646 static int
1647 displaced_step_prepare (ptid_t ptid)
1648 {
1649 struct cleanup *old_cleanups, *ignore_cleanups;
1650 struct thread_info *tp = find_thread_ptid (ptid);
1651 struct regcache *regcache = get_thread_regcache (ptid);
1652 struct gdbarch *gdbarch = get_regcache_arch (regcache);
1653 CORE_ADDR original, copy;
1654 ULONGEST len;
1655 struct displaced_step_closure *closure;
1656 struct displaced_step_inferior_state *displaced;
1657 int status;
1658
1659 /* We should never reach this function if the architecture does not
1660 support displaced stepping. */
1661 gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1662
1663 /* Disable range stepping while executing in the scratch pad. We
1664 want a single-step even if executing the displaced instruction in
1665 the scratch buffer lands within the stepping range (e.g., a
1666 jump/branch). */
1667 tp->control.may_range_step = 0;
1668
1669 /* We have to displaced step one thread at a time, as we only have
1670 access to a single scratch space per inferior. */
1671
1672 displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1673
1674 if (!ptid_equal (displaced->step_ptid, null_ptid))
1675 {
1676 /* Already waiting for a displaced step to finish. Defer this
1677 request and place in queue. */
1678 struct displaced_step_request *req, *new_req;
1679
1680 if (debug_displaced)
1681 fprintf_unfiltered (gdb_stdlog,
1682 "displaced: defering step of %s\n",
1683 target_pid_to_str (ptid));
1684
1685 new_req = xmalloc (sizeof (*new_req));
1686 new_req->ptid = ptid;
1687 new_req->next = NULL;
1688
1689 if (displaced->step_request_queue)
1690 {
1691 for (req = displaced->step_request_queue;
1692 req && req->next;
1693 req = req->next)
1694 ;
1695 req->next = new_req;
1696 }
1697 else
1698 displaced->step_request_queue = new_req;
1699
1700 return 0;
1701 }
1702 else
1703 {
1704 if (debug_displaced)
1705 fprintf_unfiltered (gdb_stdlog,
1706 "displaced: stepping %s now\n",
1707 target_pid_to_str (ptid));
1708 }
1709
1710 displaced_step_clear (displaced);
1711
1712 old_cleanups = save_inferior_ptid ();
1713 inferior_ptid = ptid;
1714
1715 original = regcache_read_pc (regcache);
1716
1717 copy = gdbarch_displaced_step_location (gdbarch);
1718 len = gdbarch_max_insn_length (gdbarch);
1719
1720 /* Save the original contents of the copy area. */
1721 displaced->step_saved_copy = xmalloc (len);
1722 ignore_cleanups = make_cleanup (free_current_contents,
1723 &displaced->step_saved_copy);
1724 status = target_read_memory (copy, displaced->step_saved_copy, len);
1725 if (status != 0)
1726 throw_error (MEMORY_ERROR,
1727 _("Error accessing memory address %s (%s) for "
1728 "displaced-stepping scratch space."),
1729 paddress (gdbarch, copy), safe_strerror (status));
1730 if (debug_displaced)
1731 {
1732 fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1733 paddress (gdbarch, copy));
1734 displaced_step_dump_bytes (gdb_stdlog,
1735 displaced->step_saved_copy,
1736 len);
1737 };
1738
1739 closure = gdbarch_displaced_step_copy_insn (gdbarch,
1740 original, copy, regcache);
1741
1742 /* We don't support the fully-simulated case at present. */
1743 gdb_assert (closure);
1744
1745 /* Save the information we need to fix things up if the step
1746 succeeds. */
1747 displaced->step_ptid = ptid;
1748 displaced->step_gdbarch = gdbarch;
1749 displaced->step_closure = closure;
1750 displaced->step_original = original;
1751 displaced->step_copy = copy;
1752
1753 make_cleanup (displaced_step_clear_cleanup, displaced);
1754
1755 /* Resume execution at the copy. */
1756 regcache_write_pc (regcache, copy);
1757
1758 discard_cleanups (ignore_cleanups);
1759
1760 do_cleanups (old_cleanups);
1761
1762 if (debug_displaced)
1763 fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1764 paddress (gdbarch, copy));
1765
1766 return 1;
1767 }
1768
1769 static void
1770 write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr,
1771 const gdb_byte *myaddr, int len)
1772 {
1773 struct cleanup *ptid_cleanup = save_inferior_ptid ();
1774
1775 inferior_ptid = ptid;
1776 write_memory (memaddr, myaddr, len);
1777 do_cleanups (ptid_cleanup);
1778 }
1779
1780 /* Restore the contents of the copy area for thread PTID. */
1781
1782 static void
1783 displaced_step_restore (struct displaced_step_inferior_state *displaced,
1784 ptid_t ptid)
1785 {
1786 ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1787
1788 write_memory_ptid (ptid, displaced->step_copy,
1789 displaced->step_saved_copy, len);
1790 if (debug_displaced)
1791 fprintf_unfiltered (gdb_stdlog, "displaced: restored %s %s\n",
1792 target_pid_to_str (ptid),
1793 paddress (displaced->step_gdbarch,
1794 displaced->step_copy));
1795 }
1796
1797 static void
1798 displaced_step_fixup (ptid_t event_ptid, enum gdb_signal signal)
1799 {
1800 struct cleanup *old_cleanups;
1801 struct displaced_step_inferior_state *displaced
1802 = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1803
1804 /* Was any thread of this process doing a displaced step? */
1805 if (displaced == NULL)
1806 return;
1807
1808 /* Was this event for the pid we displaced? */
1809 if (ptid_equal (displaced->step_ptid, null_ptid)
1810 || ! ptid_equal (displaced->step_ptid, event_ptid))
1811 return;
1812
1813 old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1814
1815 displaced_step_restore (displaced, displaced->step_ptid);
1816
1817 /* Fixup may need to read memory/registers. Switch to the thread
1818 that we're fixing up. Also, target_stopped_by_watchpoint checks
1819 the current thread. */
1820 switch_to_thread (event_ptid);
1821
1822 /* Did the instruction complete successfully? */
1823 if (signal == GDB_SIGNAL_TRAP
1824 && !(target_stopped_by_watchpoint ()
1825 && (gdbarch_have_nonsteppable_watchpoint (displaced->step_gdbarch)
1826 || target_have_steppable_watchpoint)))
1827 {
1828 /* Fix up the resulting state. */
1829 gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1830 displaced->step_closure,
1831 displaced->step_original,
1832 displaced->step_copy,
1833 get_thread_regcache (displaced->step_ptid));
1834 }
1835 else
1836 {
1837 /* Since the instruction didn't complete, all we can do is
1838 relocate the PC. */
1839 struct regcache *regcache = get_thread_regcache (event_ptid);
1840 CORE_ADDR pc = regcache_read_pc (regcache);
1841
1842 pc = displaced->step_original + (pc - displaced->step_copy);
1843 regcache_write_pc (regcache, pc);
1844 }
1845
1846 do_cleanups (old_cleanups);
1847
1848 displaced->step_ptid = null_ptid;
1849
1850 /* Are there any pending displaced stepping requests? If so, run
1851 one now. Leave the state object around, since we're likely to
1852 need it again soon. */
1853 while (displaced->step_request_queue)
1854 {
1855 struct displaced_step_request *head;
1856 ptid_t ptid;
1857 struct regcache *regcache;
1858 struct gdbarch *gdbarch;
1859 CORE_ADDR actual_pc;
1860 struct address_space *aspace;
1861
1862 head = displaced->step_request_queue;
1863 ptid = head->ptid;
1864 displaced->step_request_queue = head->next;
1865 xfree (head);
1866
1867 context_switch (ptid);
1868
1869 regcache = get_thread_regcache (ptid);
1870 actual_pc = regcache_read_pc (regcache);
1871 aspace = get_regcache_aspace (regcache);
1872 gdbarch = get_regcache_arch (regcache);
1873
1874 if (breakpoint_here_p (aspace, actual_pc))
1875 {
1876 if (debug_displaced)
1877 fprintf_unfiltered (gdb_stdlog,
1878 "displaced: stepping queued %s now\n",
1879 target_pid_to_str (ptid));
1880
1881 displaced_step_prepare (ptid);
1882
1883 if (debug_displaced)
1884 {
1885 CORE_ADDR actual_pc = regcache_read_pc (regcache);
1886 gdb_byte buf[4];
1887
1888 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1889 paddress (gdbarch, actual_pc));
1890 read_memory (actual_pc, buf, sizeof (buf));
1891 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1892 }
1893
1894 if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1895 displaced->step_closure))
1896 target_resume (ptid, 1, GDB_SIGNAL_0);
1897 else
1898 target_resume (ptid, 0, GDB_SIGNAL_0);
1899
1900 /* Done, we're stepping a thread. */
1901 break;
1902 }
1903 else
1904 {
1905 int step;
1906 struct thread_info *tp = inferior_thread ();
1907
1908 /* The breakpoint we were sitting under has since been
1909 removed. */
1910 tp->control.trap_expected = 0;
1911
1912 /* Go back to what we were trying to do. */
1913 step = currently_stepping (tp);
1914
1915 if (step)
1916 step = maybe_software_singlestep (gdbarch, actual_pc);
1917
1918 if (debug_displaced)
1919 fprintf_unfiltered (gdb_stdlog,
1920 "displaced: breakpoint is gone: %s, step(%d)\n",
1921 target_pid_to_str (tp->ptid), step);
1922
1923 target_resume (ptid, step, GDB_SIGNAL_0);
1924 tp->suspend.stop_signal = GDB_SIGNAL_0;
1925
1926 /* This request was discarded. See if there's any other
1927 thread waiting for its turn. */
1928 }
1929 }
1930 }
1931
1932 /* Update global variables holding ptids to hold NEW_PTID if they were
1933 holding OLD_PTID. */
1934 static void
1935 infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1936 {
1937 struct displaced_step_request *it;
1938 struct displaced_step_inferior_state *displaced;
1939
1940 if (ptid_equal (inferior_ptid, old_ptid))
1941 inferior_ptid = new_ptid;
1942
1943 for (displaced = displaced_step_inferior_states;
1944 displaced;
1945 displaced = displaced->next)
1946 {
1947 if (ptid_equal (displaced->step_ptid, old_ptid))
1948 displaced->step_ptid = new_ptid;
1949
1950 for (it = displaced->step_request_queue; it; it = it->next)
1951 if (ptid_equal (it->ptid, old_ptid))
1952 it->ptid = new_ptid;
1953 }
1954 }
1955
1956 \f
1957 /* Resuming. */
1958
1959 /* Things to clean up if we QUIT out of resume (). */
1960 static void
1961 resume_cleanups (void *ignore)
1962 {
1963 if (!ptid_equal (inferior_ptid, null_ptid))
1964 delete_single_step_breakpoints (inferior_thread ());
1965
1966 normal_stop ();
1967 }
1968
1969 static const char schedlock_off[] = "off";
1970 static const char schedlock_on[] = "on";
1971 static const char schedlock_step[] = "step";
1972 static const char *const scheduler_enums[] = {
1973 schedlock_off,
1974 schedlock_on,
1975 schedlock_step,
1976 NULL
1977 };
1978 static const char *scheduler_mode = schedlock_off;
1979 static void
1980 show_scheduler_mode (struct ui_file *file, int from_tty,
1981 struct cmd_list_element *c, const char *value)
1982 {
1983 fprintf_filtered (file,
1984 _("Mode for locking scheduler "
1985 "during execution is \"%s\".\n"),
1986 value);
1987 }
1988
1989 static void
1990 set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1991 {
1992 if (!target_can_lock_scheduler)
1993 {
1994 scheduler_mode = schedlock_off;
1995 error (_("Target '%s' cannot support this command."), target_shortname);
1996 }
1997 }
1998
1999 /* True if execution commands resume all threads of all processes by
2000 default; otherwise, resume only threads of the current inferior
2001 process. */
2002 int sched_multi = 0;
2003
2004 /* Try to setup for software single stepping over the specified location.
2005 Return 1 if target_resume() should use hardware single step.
2006
2007 GDBARCH the current gdbarch.
2008 PC the location to step over. */
2009
2010 static int
2011 maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
2012 {
2013 int hw_step = 1;
2014
2015 if (execution_direction == EXEC_FORWARD
2016 && gdbarch_software_single_step_p (gdbarch)
2017 && gdbarch_software_single_step (gdbarch, get_current_frame ()))
2018 {
2019 hw_step = 0;
2020 }
2021 return hw_step;
2022 }
2023
2024 /* See infrun.h. */
2025
2026 ptid_t
2027 user_visible_resume_ptid (int step)
2028 {
2029 ptid_t resume_ptid;
2030
2031 if (non_stop)
2032 {
2033 /* With non-stop mode on, threads are always handled
2034 individually. */
2035 resume_ptid = inferior_ptid;
2036 }
2037 else if ((scheduler_mode == schedlock_on)
2038 || (scheduler_mode == schedlock_step && step))
2039 {
2040 /* User-settable 'scheduler' mode requires solo thread
2041 resume. */
2042 resume_ptid = inferior_ptid;
2043 }
2044 else if (!sched_multi && target_supports_multi_process ())
2045 {
2046 /* Resume all threads of the current process (and none of other
2047 processes). */
2048 resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
2049 }
2050 else
2051 {
2052 /* Resume all threads of all processes. */
2053 resume_ptid = RESUME_ALL;
2054 }
2055
2056 return resume_ptid;
2057 }
2058
2059 /* Wrapper for target_resume, that handles infrun-specific
2060 bookkeeping. */
2061
2062 static void
2063 do_target_resume (ptid_t resume_ptid, int step, enum gdb_signal sig)
2064 {
2065 struct thread_info *tp = inferior_thread ();
2066
2067 /* Install inferior's terminal modes. */
2068 target_terminal_inferior ();
2069
2070 /* Avoid confusing the next resume, if the next stop/resume
2071 happens to apply to another thread. */
2072 tp->suspend.stop_signal = GDB_SIGNAL_0;
2073
2074 /* Advise target which signals may be handled silently.
2075
2076 If we have removed breakpoints because we are stepping over one
2077 in-line (in any thread), we need to receive all signals to avoid
2078 accidentally skipping a breakpoint during execution of a signal
2079 handler.
2080
2081 Likewise if we're displaced stepping, otherwise a trap for a
2082 breakpoint in a signal handler might be confused with the
2083 displaced step finishing. We don't make the displaced_step_fixup
2084 step distinguish the cases instead, because:
2085
2086 - a backtrace while stopped in the signal handler would show the
2087 scratch pad as frame older than the signal handler, instead of
2088 the real mainline code.
2089
2090 - when the thread is later resumed, the signal handler would
2091 return to the scratch pad area, which would no longer be
2092 valid. */
2093 if (step_over_info_valid_p ()
2094 || displaced_step_in_progress (ptid_get_pid (tp->ptid)))
2095 target_pass_signals (0, NULL);
2096 else
2097 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
2098
2099 target_resume (resume_ptid, step, sig);
2100 }
2101
2102 /* Resume the inferior, but allow a QUIT. This is useful if the user
2103 wants to interrupt some lengthy single-stepping operation
2104 (for child processes, the SIGINT goes to the inferior, and so
2105 we get a SIGINT random_signal, but for remote debugging and perhaps
2106 other targets, that's not true).
2107
2108 SIG is the signal to give the inferior (zero for none). */
2109 void
2110 resume (enum gdb_signal sig)
2111 {
2112 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
2113 struct regcache *regcache = get_current_regcache ();
2114 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2115 struct thread_info *tp = inferior_thread ();
2116 CORE_ADDR pc = regcache_read_pc (regcache);
2117 struct address_space *aspace = get_regcache_aspace (regcache);
2118 ptid_t resume_ptid;
2119 /* This represents the user's step vs continue request. When
2120 deciding whether "set scheduler-locking step" applies, it's the
2121 user's intention that counts. */
2122 const int user_step = tp->control.stepping_command;
2123 /* This represents what we'll actually request the target to do.
2124 This can decay from a step to a continue, if e.g., we need to
2125 implement single-stepping with breakpoints (software
2126 single-step). */
2127 int step;
2128
2129 tp->stepped_breakpoint = 0;
2130
2131 QUIT;
2132
2133 /* Depends on stepped_breakpoint. */
2134 step = currently_stepping (tp);
2135
2136 if (current_inferior ()->waiting_for_vfork_done)
2137 {
2138 /* Don't try to single-step a vfork parent that is waiting for
2139 the child to get out of the shared memory region (by exec'ing
2140 or exiting). This is particularly important on software
2141 single-step archs, as the child process would trip on the
2142 software single step breakpoint inserted for the parent
2143 process. Since the parent will not actually execute any
2144 instruction until the child is out of the shared region (such
2145 are vfork's semantics), it is safe to simply continue it.
2146 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2147 the parent, and tell it to `keep_going', which automatically
2148 re-sets it stepping. */
2149 if (debug_infrun)
2150 fprintf_unfiltered (gdb_stdlog,
2151 "infrun: resume : clear step\n");
2152 step = 0;
2153 }
2154
2155 if (debug_infrun)
2156 fprintf_unfiltered (gdb_stdlog,
2157 "infrun: resume (step=%d, signal=%s), "
2158 "trap_expected=%d, current thread [%s] at %s\n",
2159 step, gdb_signal_to_symbol_string (sig),
2160 tp->control.trap_expected,
2161 target_pid_to_str (inferior_ptid),
2162 paddress (gdbarch, pc));
2163
2164 /* Normally, by the time we reach `resume', the breakpoints are either
2165 removed or inserted, as appropriate. The exception is if we're sitting
2166 at a permanent breakpoint; we need to step over it, but permanent
2167 breakpoints can't be removed. So we have to test for it here. */
2168 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2169 {
2170 if (sig != GDB_SIGNAL_0)
2171 {
2172 /* We have a signal to pass to the inferior. The resume
2173 may, or may not take us to the signal handler. If this
2174 is a step, we'll need to stop in the signal handler, if
2175 there's one, (if the target supports stepping into
2176 handlers), or in the next mainline instruction, if
2177 there's no handler. If this is a continue, we need to be
2178 sure to run the handler with all breakpoints inserted.
2179 In all cases, set a breakpoint at the current address
2180 (where the handler returns to), and once that breakpoint
2181 is hit, resume skipping the permanent breakpoint. If
2182 that breakpoint isn't hit, then we've stepped into the
2183 signal handler (or hit some other event). We'll delete
2184 the step-resume breakpoint then. */
2185
2186 if (debug_infrun)
2187 fprintf_unfiltered (gdb_stdlog,
2188 "infrun: resume: skipping permanent breakpoint, "
2189 "deliver signal first\n");
2190
2191 clear_step_over_info ();
2192 tp->control.trap_expected = 0;
2193
2194 if (tp->control.step_resume_breakpoint == NULL)
2195 {
2196 /* Set a "high-priority" step-resume, as we don't want
2197 user breakpoints at PC to trigger (again) when this
2198 hits. */
2199 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2200 gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
2201
2202 tp->step_after_step_resume_breakpoint = step;
2203 }
2204
2205 insert_breakpoints ();
2206 }
2207 else
2208 {
2209 /* There's no signal to pass, we can go ahead and skip the
2210 permanent breakpoint manually. */
2211 if (debug_infrun)
2212 fprintf_unfiltered (gdb_stdlog,
2213 "infrun: resume: skipping permanent breakpoint\n");
2214 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2215 /* Update pc to reflect the new address from which we will
2216 execute instructions. */
2217 pc = regcache_read_pc (regcache);
2218
2219 if (step)
2220 {
2221 /* We've already advanced the PC, so the stepping part
2222 is done. Now we need to arrange for a trap to be
2223 reported to handle_inferior_event. Set a breakpoint
2224 at the current PC, and run to it. Don't update
2225 prev_pc, because if we end in
2226 switch_back_to_stepped_thread, we want the "expected
2227 thread advanced also" branch to be taken. IOW, we
2228 don't want this thread to step further from PC
2229 (overstep). */
2230 gdb_assert (!step_over_info_valid_p ());
2231 insert_single_step_breakpoint (gdbarch, aspace, pc);
2232 insert_breakpoints ();
2233
2234 resume_ptid = user_visible_resume_ptid (user_step);
2235 do_target_resume (resume_ptid, 0, GDB_SIGNAL_0);
2236 discard_cleanups (old_cleanups);
2237 return;
2238 }
2239 }
2240 }
2241
2242 /* If we have a breakpoint to step over, make sure to do a single
2243 step only. Same if we have software watchpoints. */
2244 if (tp->control.trap_expected || bpstat_should_step ())
2245 tp->control.may_range_step = 0;
2246
2247 /* If enabled, step over breakpoints by executing a copy of the
2248 instruction at a different address.
2249
2250 We can't use displaced stepping when we have a signal to deliver;
2251 the comments for displaced_step_prepare explain why. The
2252 comments in the handle_inferior event for dealing with 'random
2253 signals' explain what we do instead.
2254
2255 We can't use displaced stepping when we are waiting for vfork_done
2256 event, displaced stepping breaks the vfork child similarly as single
2257 step software breakpoint. */
2258 if (use_displaced_stepping (gdbarch)
2259 && tp->control.trap_expected
2260 && !step_over_info_valid_p ()
2261 && sig == GDB_SIGNAL_0
2262 && !current_inferior ()->waiting_for_vfork_done)
2263 {
2264 struct displaced_step_inferior_state *displaced;
2265
2266 if (!displaced_step_prepare (inferior_ptid))
2267 {
2268 /* Got placed in displaced stepping queue. Will be resumed
2269 later when all the currently queued displaced stepping
2270 requests finish. The thread is not executing at this
2271 point, and the call to set_executing will be made later.
2272 But we need to call set_running here, since from the
2273 user/frontend's point of view, threads were set running.
2274 Unless we're calling an inferior function, as in that
2275 case we pretend the inferior doesn't run at all. */
2276 if (!tp->control.in_infcall)
2277 set_running (user_visible_resume_ptid (user_step), 1);
2278 discard_cleanups (old_cleanups);
2279 return;
2280 }
2281
2282 /* Update pc to reflect the new address from which we will execute
2283 instructions due to displaced stepping. */
2284 pc = regcache_read_pc (get_thread_regcache (inferior_ptid));
2285
2286 displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
2287 step = gdbarch_displaced_step_hw_singlestep (gdbarch,
2288 displaced->step_closure);
2289 }
2290
2291 /* Do we need to do it the hard way, w/temp breakpoints? */
2292 else if (step)
2293 step = maybe_software_singlestep (gdbarch, pc);
2294
2295 /* Currently, our software single-step implementation leads to different
2296 results than hardware single-stepping in one situation: when stepping
2297 into delivering a signal which has an associated signal handler,
2298 hardware single-step will stop at the first instruction of the handler,
2299 while software single-step will simply skip execution of the handler.
2300
2301 For now, this difference in behavior is accepted since there is no
2302 easy way to actually implement single-stepping into a signal handler
2303 without kernel support.
2304
2305 However, there is one scenario where this difference leads to follow-on
2306 problems: if we're stepping off a breakpoint by removing all breakpoints
2307 and then single-stepping. In this case, the software single-step
2308 behavior means that even if there is a *breakpoint* in the signal
2309 handler, GDB still would not stop.
2310
2311 Fortunately, we can at least fix this particular issue. We detect
2312 here the case where we are about to deliver a signal while software
2313 single-stepping with breakpoints removed. In this situation, we
2314 revert the decisions to remove all breakpoints and insert single-
2315 step breakpoints, and instead we install a step-resume breakpoint
2316 at the current address, deliver the signal without stepping, and
2317 once we arrive back at the step-resume breakpoint, actually step
2318 over the breakpoint we originally wanted to step over. */
2319 if (thread_has_single_step_breakpoints_set (tp)
2320 && sig != GDB_SIGNAL_0
2321 && step_over_info_valid_p ())
2322 {
2323 /* If we have nested signals or a pending signal is delivered
2324 immediately after a handler returns, might might already have
2325 a step-resume breakpoint set on the earlier handler. We cannot
2326 set another step-resume breakpoint; just continue on until the
2327 original breakpoint is hit. */
2328 if (tp->control.step_resume_breakpoint == NULL)
2329 {
2330 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2331 tp->step_after_step_resume_breakpoint = 1;
2332 }
2333
2334 delete_single_step_breakpoints (tp);
2335
2336 clear_step_over_info ();
2337 tp->control.trap_expected = 0;
2338
2339 insert_breakpoints ();
2340 }
2341
2342 /* If STEP is set, it's a request to use hardware stepping
2343 facilities. But in that case, we should never
2344 use singlestep breakpoint. */
2345 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2346
2347 /* Decide the set of threads to ask the target to resume. Start
2348 by assuming everything will be resumed, than narrow the set
2349 by applying increasingly restricting conditions. */
2350 resume_ptid = user_visible_resume_ptid (user_step);
2351
2352 /* Even if RESUME_PTID is a wildcard, and we end up resuming less
2353 (e.g., we might need to step over a breakpoint), from the
2354 user/frontend's point of view, all threads in RESUME_PTID are now
2355 running. Unless we're calling an inferior function, as in that
2356 case pretend we inferior doesn't run at all. */
2357 if (!tp->control.in_infcall)
2358 set_running (resume_ptid, 1);
2359
2360 /* Maybe resume a single thread after all. */
2361 if ((step || thread_has_single_step_breakpoints_set (tp))
2362 && tp->control.trap_expected)
2363 {
2364 /* We're allowing a thread to run past a breakpoint it has
2365 hit, by single-stepping the thread with the breakpoint
2366 removed. In which case, we need to single-step only this
2367 thread, and keep others stopped, as they can miss this
2368 breakpoint if allowed to run. */
2369 resume_ptid = inferior_ptid;
2370 }
2371
2372 if (execution_direction != EXEC_REVERSE
2373 && step && breakpoint_inserted_here_p (aspace, pc))
2374 {
2375 /* The only case we currently need to step a breakpoint
2376 instruction is when we have a signal to deliver. See
2377 handle_signal_stop where we handle random signals that could
2378 take out us out of the stepping range. Normally, in that
2379 case we end up continuing (instead of stepping) over the
2380 signal handler with a breakpoint at PC, but there are cases
2381 where we should _always_ single-step, even if we have a
2382 step-resume breakpoint, like when a software watchpoint is
2383 set. Assuming single-stepping and delivering a signal at the
2384 same time would takes us to the signal handler, then we could
2385 have removed the breakpoint at PC to step over it. However,
2386 some hardware step targets (like e.g., Mac OS) can't step
2387 into signal handlers, and for those, we need to leave the
2388 breakpoint at PC inserted, as otherwise if the handler
2389 recurses and executes PC again, it'll miss the breakpoint.
2390 So we leave the breakpoint inserted anyway, but we need to
2391 record that we tried to step a breakpoint instruction, so
2392 that adjust_pc_after_break doesn't end up confused. */
2393 gdb_assert (sig != GDB_SIGNAL_0);
2394
2395 tp->stepped_breakpoint = 1;
2396
2397 /* Most targets can step a breakpoint instruction, thus
2398 executing it normally. But if this one cannot, just
2399 continue and we will hit it anyway. */
2400 if (gdbarch_cannot_step_breakpoint (gdbarch))
2401 step = 0;
2402 }
2403
2404 if (debug_displaced
2405 && use_displaced_stepping (gdbarch)
2406 && tp->control.trap_expected
2407 && !step_over_info_valid_p ())
2408 {
2409 struct regcache *resume_regcache = get_thread_regcache (tp->ptid);
2410 struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
2411 CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
2412 gdb_byte buf[4];
2413
2414 fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
2415 paddress (resume_gdbarch, actual_pc));
2416 read_memory (actual_pc, buf, sizeof (buf));
2417 displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
2418 }
2419
2420 if (tp->control.may_range_step)
2421 {
2422 /* If we're resuming a thread with the PC out of the step
2423 range, then we're doing some nested/finer run control
2424 operation, like stepping the thread out of the dynamic
2425 linker or the displaced stepping scratch pad. We
2426 shouldn't have allowed a range step then. */
2427 gdb_assert (pc_in_thread_step_range (pc, tp));
2428 }
2429
2430 do_target_resume (resume_ptid, step, sig);
2431 discard_cleanups (old_cleanups);
2432 }
2433 \f
2434 /* Proceeding. */
2435
2436 /* Clear out all variables saying what to do when inferior is continued.
2437 First do this, then set the ones you want, then call `proceed'. */
2438
2439 static void
2440 clear_proceed_status_thread (struct thread_info *tp)
2441 {
2442 if (debug_infrun)
2443 fprintf_unfiltered (gdb_stdlog,
2444 "infrun: clear_proceed_status_thread (%s)\n",
2445 target_pid_to_str (tp->ptid));
2446
2447 /* If this signal should not be seen by program, give it zero.
2448 Used for debugging signals. */
2449 if (!signal_pass_state (tp->suspend.stop_signal))
2450 tp->suspend.stop_signal = GDB_SIGNAL_0;
2451
2452 tp->control.trap_expected = 0;
2453 tp->control.step_range_start = 0;
2454 tp->control.step_range_end = 0;
2455 tp->control.may_range_step = 0;
2456 tp->control.step_frame_id = null_frame_id;
2457 tp->control.step_stack_frame_id = null_frame_id;
2458 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
2459 tp->control.step_start_function = NULL;
2460 tp->stop_requested = 0;
2461
2462 tp->control.stop_step = 0;
2463
2464 tp->control.proceed_to_finish = 0;
2465
2466 tp->control.command_interp = NULL;
2467 tp->control.stepping_command = 0;
2468
2469 /* Discard any remaining commands or status from previous stop. */
2470 bpstat_clear (&tp->control.stop_bpstat);
2471 }
2472
2473 void
2474 clear_proceed_status (int step)
2475 {
2476 if (!non_stop)
2477 {
2478 struct thread_info *tp;
2479 ptid_t resume_ptid;
2480
2481 resume_ptid = user_visible_resume_ptid (step);
2482
2483 /* In all-stop mode, delete the per-thread status of all threads
2484 we're about to resume, implicitly and explicitly. */
2485 ALL_NON_EXITED_THREADS (tp)
2486 {
2487 if (!ptid_match (tp->ptid, resume_ptid))
2488 continue;
2489 clear_proceed_status_thread (tp);
2490 }
2491 }
2492
2493 if (!ptid_equal (inferior_ptid, null_ptid))
2494 {
2495 struct inferior *inferior;
2496
2497 if (non_stop)
2498 {
2499 /* If in non-stop mode, only delete the per-thread status of
2500 the current thread. */
2501 clear_proceed_status_thread (inferior_thread ());
2502 }
2503
2504 inferior = current_inferior ();
2505 inferior->control.stop_soon = NO_STOP_QUIETLY;
2506 }
2507
2508 stop_after_trap = 0;
2509
2510 clear_step_over_info ();
2511
2512 observer_notify_about_to_proceed ();
2513
2514 if (stop_registers)
2515 {
2516 regcache_xfree (stop_registers);
2517 stop_registers = NULL;
2518 }
2519 }
2520
2521 /* Returns true if TP is still stopped at a breakpoint that needs
2522 stepping-over in order to make progress. If the breakpoint is gone
2523 meanwhile, we can skip the whole step-over dance. */
2524
2525 static int
2526 thread_still_needs_step_over (struct thread_info *tp)
2527 {
2528 if (tp->stepping_over_breakpoint)
2529 {
2530 struct regcache *regcache = get_thread_regcache (tp->ptid);
2531
2532 if (breakpoint_here_p (get_regcache_aspace (regcache),
2533 regcache_read_pc (regcache))
2534 == ordinary_breakpoint_here)
2535 return 1;
2536
2537 tp->stepping_over_breakpoint = 0;
2538 }
2539
2540 return 0;
2541 }
2542
2543 /* Returns true if scheduler locking applies. STEP indicates whether
2544 we're about to do a step/next-like command to a thread. */
2545
2546 static int
2547 schedlock_applies (struct thread_info *tp)
2548 {
2549 return (scheduler_mode == schedlock_on
2550 || (scheduler_mode == schedlock_step
2551 && tp->control.stepping_command));
2552 }
2553
2554 /* Look a thread other than EXCEPT that has previously reported a
2555 breakpoint event, and thus needs a step-over in order to make
2556 progress. Returns NULL is none is found. */
2557
2558 static struct thread_info *
2559 find_thread_needs_step_over (struct thread_info *except)
2560 {
2561 struct thread_info *tp, *current;
2562
2563 /* With non-stop mode on, threads are always handled individually. */
2564 gdb_assert (! non_stop);
2565
2566 current = inferior_thread ();
2567
2568 /* If scheduler locking applies, we can avoid iterating over all
2569 threads. */
2570 if (schedlock_applies (except))
2571 {
2572 if (except != current
2573 && thread_still_needs_step_over (current))
2574 return current;
2575
2576 return NULL;
2577 }
2578
2579 ALL_NON_EXITED_THREADS (tp)
2580 {
2581 /* Ignore the EXCEPT thread. */
2582 if (tp == except)
2583 continue;
2584 /* Ignore threads of processes we're not resuming. */
2585 if (!sched_multi
2586 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
2587 continue;
2588
2589 if (thread_still_needs_step_over (tp))
2590 return tp;
2591 }
2592
2593 return NULL;
2594 }
2595
2596 /* Basic routine for continuing the program in various fashions.
2597
2598 ADDR is the address to resume at, or -1 for resume where stopped.
2599 SIGGNAL is the signal to give it, or 0 for none,
2600 or -1 for act according to how it stopped.
2601 STEP is nonzero if should trap after one instruction.
2602 -1 means return after that and print nothing.
2603 You should probably set various step_... variables
2604 before calling here, if you are stepping.
2605
2606 You should call clear_proceed_status before calling proceed. */
2607
2608 void
2609 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
2610 {
2611 struct regcache *regcache;
2612 struct gdbarch *gdbarch;
2613 struct thread_info *tp;
2614 CORE_ADDR pc;
2615 struct address_space *aspace;
2616
2617 /* If we're stopped at a fork/vfork, follow the branch set by the
2618 "set follow-fork-mode" command; otherwise, we'll just proceed
2619 resuming the current thread. */
2620 if (!follow_fork ())
2621 {
2622 /* The target for some reason decided not to resume. */
2623 normal_stop ();
2624 if (target_can_async_p ())
2625 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2626 return;
2627 }
2628
2629 /* We'll update this if & when we switch to a new thread. */
2630 previous_inferior_ptid = inferior_ptid;
2631
2632 regcache = get_current_regcache ();
2633 gdbarch = get_regcache_arch (regcache);
2634 aspace = get_regcache_aspace (regcache);
2635 pc = regcache_read_pc (regcache);
2636 tp = inferior_thread ();
2637
2638 /* Fill in with reasonable starting values. */
2639 init_thread_stepping_state (tp);
2640
2641 if (addr == (CORE_ADDR) -1)
2642 {
2643 if (pc == stop_pc
2644 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
2645 && execution_direction != EXEC_REVERSE)
2646 /* There is a breakpoint at the address we will resume at,
2647 step one instruction before inserting breakpoints so that
2648 we do not stop right away (and report a second hit at this
2649 breakpoint).
2650
2651 Note, we don't do this in reverse, because we won't
2652 actually be executing the breakpoint insn anyway.
2653 We'll be (un-)executing the previous instruction. */
2654 tp->stepping_over_breakpoint = 1;
2655 else if (gdbarch_single_step_through_delay_p (gdbarch)
2656 && gdbarch_single_step_through_delay (gdbarch,
2657 get_current_frame ()))
2658 /* We stepped onto an instruction that needs to be stepped
2659 again before re-inserting the breakpoint, do so. */
2660 tp->stepping_over_breakpoint = 1;
2661 }
2662 else
2663 {
2664 regcache_write_pc (regcache, addr);
2665 }
2666
2667 if (siggnal != GDB_SIGNAL_DEFAULT)
2668 tp->suspend.stop_signal = siggnal;
2669
2670 /* Record the interpreter that issued the execution command that
2671 caused this thread to resume. If the top level interpreter is
2672 MI/async, and the execution command was a CLI command
2673 (next/step/etc.), we'll want to print stop event output to the MI
2674 console channel (the stepped-to line, etc.), as if the user
2675 entered the execution command on a real GDB console. */
2676 inferior_thread ()->control.command_interp = command_interp ();
2677
2678 if (debug_infrun)
2679 fprintf_unfiltered (gdb_stdlog,
2680 "infrun: proceed (addr=%s, signal=%s)\n",
2681 paddress (gdbarch, addr),
2682 gdb_signal_to_symbol_string (siggnal));
2683
2684 if (non_stop)
2685 /* In non-stop, each thread is handled individually. The context
2686 must already be set to the right thread here. */
2687 ;
2688 else
2689 {
2690 struct thread_info *step_over;
2691
2692 /* In a multi-threaded task we may select another thread and
2693 then continue or step.
2694
2695 But if the old thread was stopped at a breakpoint, it will
2696 immediately cause another breakpoint stop without any
2697 execution (i.e. it will report a breakpoint hit incorrectly).
2698 So we must step over it first.
2699
2700 Look for a thread other than the current (TP) that reported a
2701 breakpoint hit and hasn't been resumed yet since. */
2702 step_over = find_thread_needs_step_over (tp);
2703 if (step_over != NULL)
2704 {
2705 if (debug_infrun)
2706 fprintf_unfiltered (gdb_stdlog,
2707 "infrun: need to step-over [%s] first\n",
2708 target_pid_to_str (step_over->ptid));
2709
2710 /* Store the prev_pc for the stepping thread too, needed by
2711 switch_back_to_stepped_thread. */
2712 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2713 switch_to_thread (step_over->ptid);
2714 tp = step_over;
2715 }
2716 }
2717
2718 /* If we need to step over a breakpoint, and we're not using
2719 displaced stepping to do so, insert all breakpoints (watchpoints,
2720 etc.) but the one we're stepping over, step one instruction, and
2721 then re-insert the breakpoint when that step is finished. */
2722 if (tp->stepping_over_breakpoint && !use_displaced_stepping (gdbarch))
2723 {
2724 struct regcache *regcache = get_current_regcache ();
2725
2726 set_step_over_info (get_regcache_aspace (regcache),
2727 regcache_read_pc (regcache), 0);
2728 }
2729 else
2730 clear_step_over_info ();
2731
2732 insert_breakpoints ();
2733
2734 tp->control.trap_expected = tp->stepping_over_breakpoint;
2735
2736 annotate_starting ();
2737
2738 /* Make sure that output from GDB appears before output from the
2739 inferior. */
2740 gdb_flush (gdb_stdout);
2741
2742 /* Refresh prev_pc value just prior to resuming. This used to be
2743 done in stop_waiting, however, setting prev_pc there did not handle
2744 scenarios such as inferior function calls or returning from
2745 a function via the return command. In those cases, the prev_pc
2746 value was not set properly for subsequent commands. The prev_pc value
2747 is used to initialize the starting line number in the ecs. With an
2748 invalid value, the gdb next command ends up stopping at the position
2749 represented by the next line table entry past our start position.
2750 On platforms that generate one line table entry per line, this
2751 is not a problem. However, on the ia64, the compiler generates
2752 extraneous line table entries that do not increase the line number.
2753 When we issue the gdb next command on the ia64 after an inferior call
2754 or a return command, we often end up a few instructions forward, still
2755 within the original line we started.
2756
2757 An attempt was made to refresh the prev_pc at the same time the
2758 execution_control_state is initialized (for instance, just before
2759 waiting for an inferior event). But this approach did not work
2760 because of platforms that use ptrace, where the pc register cannot
2761 be read unless the inferior is stopped. At that point, we are not
2762 guaranteed the inferior is stopped and so the regcache_read_pc() call
2763 can fail. Setting the prev_pc value here ensures the value is updated
2764 correctly when the inferior is stopped. */
2765 tp->prev_pc = regcache_read_pc (get_current_regcache ());
2766
2767 /* Resume inferior. */
2768 resume (tp->suspend.stop_signal);
2769
2770 /* Wait for it to stop (if not standalone)
2771 and in any case decode why it stopped, and act accordingly. */
2772 /* Do this only if we are not using the event loop, or if the target
2773 does not support asynchronous execution. */
2774 if (!target_can_async_p ())
2775 {
2776 wait_for_inferior ();
2777 normal_stop ();
2778 }
2779 }
2780 \f
2781
2782 /* Start remote-debugging of a machine over a serial link. */
2783
2784 void
2785 start_remote (int from_tty)
2786 {
2787 struct inferior *inferior;
2788
2789 inferior = current_inferior ();
2790 inferior->control.stop_soon = STOP_QUIETLY_REMOTE;
2791
2792 /* Always go on waiting for the target, regardless of the mode. */
2793 /* FIXME: cagney/1999-09-23: At present it isn't possible to
2794 indicate to wait_for_inferior that a target should timeout if
2795 nothing is returned (instead of just blocking). Because of this,
2796 targets expecting an immediate response need to, internally, set
2797 things up so that the target_wait() is forced to eventually
2798 timeout. */
2799 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2800 differentiate to its caller what the state of the target is after
2801 the initial open has been performed. Here we're assuming that
2802 the target has stopped. It should be possible to eventually have
2803 target_open() return to the caller an indication that the target
2804 is currently running and GDB state should be set to the same as
2805 for an async run. */
2806 wait_for_inferior ();
2807
2808 /* Now that the inferior has stopped, do any bookkeeping like
2809 loading shared libraries. We want to do this before normal_stop,
2810 so that the displayed frame is up to date. */
2811 post_create_inferior (&current_target, from_tty);
2812
2813 normal_stop ();
2814 }
2815
2816 /* Initialize static vars when a new inferior begins. */
2817
2818 void
2819 init_wait_for_inferior (void)
2820 {
2821 /* These are meaningless until the first time through wait_for_inferior. */
2822
2823 breakpoint_init_inferior (inf_starting);
2824
2825 clear_proceed_status (0);
2826
2827 target_last_wait_ptid = minus_one_ptid;
2828
2829 previous_inferior_ptid = inferior_ptid;
2830
2831 /* Discard any skipped inlined frames. */
2832 clear_inline_frame_state (minus_one_ptid);
2833 }
2834
2835 \f
2836 /* Data to be passed around while handling an event. This data is
2837 discarded between events. */
2838 struct execution_control_state
2839 {
2840 ptid_t ptid;
2841 /* The thread that got the event, if this was a thread event; NULL
2842 otherwise. */
2843 struct thread_info *event_thread;
2844
2845 struct target_waitstatus ws;
2846 int stop_func_filled_in;
2847 CORE_ADDR stop_func_start;
2848 CORE_ADDR stop_func_end;
2849 const char *stop_func_name;
2850 int wait_some_more;
2851
2852 /* True if the event thread hit the single-step breakpoint of
2853 another thread. Thus the event doesn't cause a stop, the thread
2854 needs to be single-stepped past the single-step breakpoint before
2855 we can switch back to the original stepping thread. */
2856 int hit_singlestep_breakpoint;
2857 };
2858
2859 static void handle_inferior_event (struct execution_control_state *ecs);
2860
2861 static void handle_step_into_function (struct gdbarch *gdbarch,
2862 struct execution_control_state *ecs);
2863 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2864 struct execution_control_state *ecs);
2865 static void handle_signal_stop (struct execution_control_state *ecs);
2866 static void check_exception_resume (struct execution_control_state *,
2867 struct frame_info *);
2868
2869 static void end_stepping_range (struct execution_control_state *ecs);
2870 static void stop_waiting (struct execution_control_state *ecs);
2871 static void prepare_to_wait (struct execution_control_state *ecs);
2872 static void keep_going (struct execution_control_state *ecs);
2873 static void process_event_stop_test (struct execution_control_state *ecs);
2874 static int switch_back_to_stepped_thread (struct execution_control_state *ecs);
2875
2876 /* Callback for iterate over threads. If the thread is stopped, but
2877 the user/frontend doesn't know about that yet, go through
2878 normal_stop, as if the thread had just stopped now. ARG points at
2879 a ptid. If PTID is MINUS_ONE_PTID, applies to all threads. If
2880 ptid_is_pid(PTID) is true, applies to all threads of the process
2881 pointed at by PTID. Otherwise, apply only to the thread pointed by
2882 PTID. */
2883
2884 static int
2885 infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2886 {
2887 ptid_t ptid = * (ptid_t *) arg;
2888
2889 if ((ptid_equal (info->ptid, ptid)
2890 || ptid_equal (minus_one_ptid, ptid)
2891 || (ptid_is_pid (ptid)
2892 && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2893 && is_running (info->ptid)
2894 && !is_executing (info->ptid))
2895 {
2896 struct cleanup *old_chain;
2897 struct execution_control_state ecss;
2898 struct execution_control_state *ecs = &ecss;
2899
2900 memset (ecs, 0, sizeof (*ecs));
2901
2902 old_chain = make_cleanup_restore_current_thread ();
2903
2904 overlay_cache_invalid = 1;
2905 /* Flush target cache before starting to handle each event.
2906 Target was running and cache could be stale. This is just a
2907 heuristic. Running threads may modify target memory, but we
2908 don't get any event. */
2909 target_dcache_invalidate ();
2910
2911 /* Go through handle_inferior_event/normal_stop, so we always
2912 have consistent output as if the stop event had been
2913 reported. */
2914 ecs->ptid = info->ptid;
2915 ecs->event_thread = find_thread_ptid (info->ptid);
2916 ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2917 ecs->ws.value.sig = GDB_SIGNAL_0;
2918
2919 handle_inferior_event (ecs);
2920
2921 if (!ecs->wait_some_more)
2922 {
2923 struct thread_info *tp;
2924
2925 normal_stop ();
2926
2927 /* Finish off the continuations. */
2928 tp = inferior_thread ();
2929 do_all_intermediate_continuations_thread (tp, 1);
2930 do_all_continuations_thread (tp, 1);
2931 }
2932
2933 do_cleanups (old_chain);
2934 }
2935
2936 return 0;
2937 }
2938
2939 /* This function is attached as a "thread_stop_requested" observer.
2940 Cleanup local state that assumed the PTID was to be resumed, and
2941 report the stop to the frontend. */
2942
2943 static void
2944 infrun_thread_stop_requested (ptid_t ptid)
2945 {
2946 struct displaced_step_inferior_state *displaced;
2947
2948 /* PTID was requested to stop. Remove it from the displaced
2949 stepping queue, so we don't try to resume it automatically. */
2950
2951 for (displaced = displaced_step_inferior_states;
2952 displaced;
2953 displaced = displaced->next)
2954 {
2955 struct displaced_step_request *it, **prev_next_p;
2956
2957 it = displaced->step_request_queue;
2958 prev_next_p = &displaced->step_request_queue;
2959 while (it)
2960 {
2961 if (ptid_match (it->ptid, ptid))
2962 {
2963 *prev_next_p = it->next;
2964 it->next = NULL;
2965 xfree (it);
2966 }
2967 else
2968 {
2969 prev_next_p = &it->next;
2970 }
2971
2972 it = *prev_next_p;
2973 }
2974 }
2975
2976 iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2977 }
2978
2979 static void
2980 infrun_thread_thread_exit (struct thread_info *tp, int silent)
2981 {
2982 if (ptid_equal (target_last_wait_ptid, tp->ptid))
2983 nullify_last_target_wait_ptid ();
2984 }
2985
2986 /* Delete the step resume, single-step and longjmp/exception resume
2987 breakpoints of TP. */
2988
2989 static void
2990 delete_thread_infrun_breakpoints (struct thread_info *tp)
2991 {
2992 delete_step_resume_breakpoint (tp);
2993 delete_exception_resume_breakpoint (tp);
2994 delete_single_step_breakpoints (tp);
2995 }
2996
2997 /* If the target still has execution, call FUNC for each thread that
2998 just stopped. In all-stop, that's all the non-exited threads; in
2999 non-stop, that's the current thread, only. */
3000
3001 typedef void (*for_each_just_stopped_thread_callback_func)
3002 (struct thread_info *tp);
3003
3004 static void
3005 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3006 {
3007 if (!target_has_execution || ptid_equal (inferior_ptid, null_ptid))
3008 return;
3009
3010 if (non_stop)
3011 {
3012 /* If in non-stop mode, only the current thread stopped. */
3013 func (inferior_thread ());
3014 }
3015 else
3016 {
3017 struct thread_info *tp;
3018
3019 /* In all-stop mode, all threads have stopped. */
3020 ALL_NON_EXITED_THREADS (tp)
3021 {
3022 func (tp);
3023 }
3024 }
3025 }
3026
3027 /* Delete the step resume and longjmp/exception resume breakpoints of
3028 the threads that just stopped. */
3029
3030 static void
3031 delete_just_stopped_threads_infrun_breakpoints (void)
3032 {
3033 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3034 }
3035
3036 /* Delete the single-step breakpoints of the threads that just
3037 stopped. */
3038
3039 static void
3040 delete_just_stopped_threads_single_step_breakpoints (void)
3041 {
3042 for_each_just_stopped_thread (delete_single_step_breakpoints);
3043 }
3044
3045 /* A cleanup wrapper. */
3046
3047 static void
3048 delete_just_stopped_threads_infrun_breakpoints_cleanup (void *arg)
3049 {
3050 delete_just_stopped_threads_infrun_breakpoints ();
3051 }
3052
3053 /* Pretty print the results of target_wait, for debugging purposes. */
3054
3055 static void
3056 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3057 const struct target_waitstatus *ws)
3058 {
3059 char *status_string = target_waitstatus_to_string (ws);
3060 struct ui_file *tmp_stream = mem_fileopen ();
3061 char *text;
3062
3063 /* The text is split over several lines because it was getting too long.
3064 Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
3065 output as a unit; we want only one timestamp printed if debug_timestamp
3066 is set. */
3067
3068 fprintf_unfiltered (tmp_stream,
3069 "infrun: target_wait (%d.%ld.%ld",
3070 ptid_get_pid (waiton_ptid),
3071 ptid_get_lwp (waiton_ptid),
3072 ptid_get_tid (waiton_ptid));
3073 if (ptid_get_pid (waiton_ptid) != -1)
3074 fprintf_unfiltered (tmp_stream,
3075 " [%s]", target_pid_to_str (waiton_ptid));
3076 fprintf_unfiltered (tmp_stream, ", status) =\n");
3077 fprintf_unfiltered (tmp_stream,
3078 "infrun: %d.%ld.%ld [%s],\n",
3079 ptid_get_pid (result_ptid),
3080 ptid_get_lwp (result_ptid),
3081 ptid_get_tid (result_ptid),
3082 target_pid_to_str (result_ptid));
3083 fprintf_unfiltered (tmp_stream,
3084 "infrun: %s\n",
3085 status_string);
3086
3087 text = ui_file_xstrdup (tmp_stream, NULL);
3088
3089 /* This uses %s in part to handle %'s in the text, but also to avoid
3090 a gcc error: the format attribute requires a string literal. */
3091 fprintf_unfiltered (gdb_stdlog, "%s", text);
3092
3093 xfree (status_string);
3094 xfree (text);
3095 ui_file_delete (tmp_stream);
3096 }
3097
3098 /* Prepare and stabilize the inferior for detaching it. E.g.,
3099 detaching while a thread is displaced stepping is a recipe for
3100 crashing it, as nothing would readjust the PC out of the scratch
3101 pad. */
3102
3103 void
3104 prepare_for_detach (void)
3105 {
3106 struct inferior *inf = current_inferior ();
3107 ptid_t pid_ptid = pid_to_ptid (inf->pid);
3108 struct cleanup *old_chain_1;
3109 struct displaced_step_inferior_state *displaced;
3110
3111 displaced = get_displaced_stepping_state (inf->pid);
3112
3113 /* Is any thread of this process displaced stepping? If not,
3114 there's nothing else to do. */
3115 if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
3116 return;
3117
3118 if (debug_infrun)
3119 fprintf_unfiltered (gdb_stdlog,
3120 "displaced-stepping in-process while detaching");
3121
3122 old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
3123 inf->detaching = 1;
3124
3125 while (!ptid_equal (displaced->step_ptid, null_ptid))
3126 {
3127 struct cleanup *old_chain_2;
3128 struct execution_control_state ecss;
3129 struct execution_control_state *ecs;
3130
3131 ecs = &ecss;
3132 memset (ecs, 0, sizeof (*ecs));
3133
3134 overlay_cache_invalid = 1;
3135 /* Flush target cache before starting to handle each event.
3136 Target was running and cache could be stale. This is just a
3137 heuristic. Running threads may modify target memory, but we
3138 don't get any event. */
3139 target_dcache_invalidate ();
3140
3141 if (deprecated_target_wait_hook)
3142 ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
3143 else
3144 ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
3145
3146 if (debug_infrun)
3147 print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
3148
3149 /* If an error happens while handling the event, propagate GDB's
3150 knowledge of the executing state to the frontend/user running
3151 state. */
3152 old_chain_2 = make_cleanup (finish_thread_state_cleanup,
3153 &minus_one_ptid);
3154
3155 /* Now figure out what to do with the result of the result. */
3156 handle_inferior_event (ecs);
3157
3158 /* No error, don't finish the state yet. */
3159 discard_cleanups (old_chain_2);
3160
3161 /* Breakpoints and watchpoints are not installed on the target
3162 at this point, and signals are passed directly to the
3163 inferior, so this must mean the process is gone. */
3164 if (!ecs->wait_some_more)
3165 {
3166 discard_cleanups (old_chain_1);
3167 error (_("Program exited while detaching"));
3168 }
3169 }
3170
3171 discard_cleanups (old_chain_1);
3172 }
3173
3174 /* Wait for control to return from inferior to debugger.
3175
3176 If inferior gets a signal, we may decide to start it up again
3177 instead of returning. That is why there is a loop in this function.
3178 When this function actually returns it means the inferior
3179 should be left stopped and GDB should read more commands. */
3180
3181 void
3182 wait_for_inferior (void)
3183 {
3184 struct cleanup *old_cleanups;
3185 struct cleanup *thread_state_chain;
3186
3187 if (debug_infrun)
3188 fprintf_unfiltered
3189 (gdb_stdlog, "infrun: wait_for_inferior ()\n");
3190
3191 old_cleanups
3192 = make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup,
3193 NULL);
3194
3195 /* If an error happens while handling the event, propagate GDB's
3196 knowledge of the executing state to the frontend/user running
3197 state. */
3198 thread_state_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3199
3200 while (1)
3201 {
3202 struct execution_control_state ecss;
3203 struct execution_control_state *ecs = &ecss;
3204 ptid_t waiton_ptid = minus_one_ptid;
3205
3206 memset (ecs, 0, sizeof (*ecs));
3207
3208 overlay_cache_invalid = 1;
3209
3210 /* Flush target cache before starting to handle each event.
3211 Target was running and cache could be stale. This is just a
3212 heuristic. Running threads may modify target memory, but we
3213 don't get any event. */
3214 target_dcache_invalidate ();
3215
3216 if (deprecated_target_wait_hook)
3217 ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
3218 else
3219 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
3220
3221 if (debug_infrun)
3222 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3223
3224 /* Now figure out what to do with the result of the result. */
3225 handle_inferior_event (ecs);
3226
3227 if (!ecs->wait_some_more)
3228 break;
3229 }
3230
3231 /* No error, don't finish the state yet. */
3232 discard_cleanups (thread_state_chain);
3233
3234 do_cleanups (old_cleanups);
3235 }
3236
3237 /* Cleanup that reinstalls the readline callback handler, if the
3238 target is running in the background. If while handling the target
3239 event something triggered a secondary prompt, like e.g., a
3240 pagination prompt, we'll have removed the callback handler (see
3241 gdb_readline_wrapper_line). Need to do this as we go back to the
3242 event loop, ready to process further input. Note this has no
3243 effect if the handler hasn't actually been removed, because calling
3244 rl_callback_handler_install resets the line buffer, thus losing
3245 input. */
3246
3247 static void
3248 reinstall_readline_callback_handler_cleanup (void *arg)
3249 {
3250 if (!interpreter_async)
3251 {
3252 /* We're not going back to the top level event loop yet. Don't
3253 install the readline callback, as it'd prep the terminal,
3254 readline-style (raw, noecho) (e.g., --batch). We'll install
3255 it the next time the prompt is displayed, when we're ready
3256 for input. */
3257 return;
3258 }
3259
3260 if (async_command_editing_p && !sync_execution)
3261 gdb_rl_callback_handler_reinstall ();
3262 }
3263
3264 /* Asynchronous version of wait_for_inferior. It is called by the
3265 event loop whenever a change of state is detected on the file
3266 descriptor corresponding to the target. It can be called more than
3267 once to complete a single execution command. In such cases we need
3268 to keep the state in a global variable ECSS. If it is the last time
3269 that this function is called for a single execution command, then
3270 report to the user that the inferior has stopped, and do the
3271 necessary cleanups. */
3272
3273 void
3274 fetch_inferior_event (void *client_data)
3275 {
3276 struct execution_control_state ecss;
3277 struct execution_control_state *ecs = &ecss;
3278 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
3279 struct cleanup *ts_old_chain;
3280 int was_sync = sync_execution;
3281 int cmd_done = 0;
3282 ptid_t waiton_ptid = minus_one_ptid;
3283
3284 memset (ecs, 0, sizeof (*ecs));
3285
3286 /* End up with readline processing input, if necessary. */
3287 make_cleanup (reinstall_readline_callback_handler_cleanup, NULL);
3288
3289 /* We're handling a live event, so make sure we're doing live
3290 debugging. If we're looking at traceframes while the target is
3291 running, we're going to need to get back to that mode after
3292 handling the event. */
3293 if (non_stop)
3294 {
3295 make_cleanup_restore_current_traceframe ();
3296 set_current_traceframe (-1);
3297 }
3298
3299 if (non_stop)
3300 /* In non-stop mode, the user/frontend should not notice a thread
3301 switch due to internal events. Make sure we reverse to the
3302 user selected thread and frame after handling the event and
3303 running any breakpoint commands. */
3304 make_cleanup_restore_current_thread ();
3305
3306 overlay_cache_invalid = 1;
3307 /* Flush target cache before starting to handle each event. Target
3308 was running and cache could be stale. This is just a heuristic.
3309 Running threads may modify target memory, but we don't get any
3310 event. */
3311 target_dcache_invalidate ();
3312
3313 make_cleanup_restore_integer (&execution_direction);
3314 execution_direction = target_execution_direction ();
3315
3316 if (deprecated_target_wait_hook)
3317 ecs->ptid =
3318 deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3319 else
3320 ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
3321
3322 if (debug_infrun)
3323 print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
3324
3325 /* If an error happens while handling the event, propagate GDB's
3326 knowledge of the executing state to the frontend/user running
3327 state. */
3328 if (!non_stop)
3329 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
3330 else
3331 ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
3332
3333 /* Get executed before make_cleanup_restore_current_thread above to apply
3334 still for the thread which has thrown the exception. */
3335 make_bpstat_clear_actions_cleanup ();
3336
3337 make_cleanup (delete_just_stopped_threads_infrun_breakpoints_cleanup, NULL);
3338
3339 /* Now figure out what to do with the result of the result. */
3340 handle_inferior_event (ecs);
3341
3342 if (!ecs->wait_some_more)
3343 {
3344 struct inferior *inf = find_inferior_ptid (ecs->ptid);
3345
3346 delete_just_stopped_threads_infrun_breakpoints ();
3347
3348 /* We may not find an inferior if this was a process exit. */
3349 if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
3350 normal_stop ();
3351
3352 if (target_has_execution
3353 && ecs->ws.kind != TARGET_WAITKIND_NO_RESUMED
3354 && ecs->ws.kind != TARGET_WAITKIND_EXITED
3355 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3356 && ecs->event_thread->step_multi
3357 && ecs->event_thread->control.stop_step)
3358 inferior_event_handler (INF_EXEC_CONTINUE, NULL);
3359 else
3360 {
3361 inferior_event_handler (INF_EXEC_COMPLETE, NULL);
3362 cmd_done = 1;
3363 }
3364 }
3365
3366 /* No error, don't finish the thread states yet. */
3367 discard_cleanups (ts_old_chain);
3368
3369 /* Revert thread and frame. */
3370 do_cleanups (old_chain);
3371
3372 /* If the inferior was in sync execution mode, and now isn't,
3373 restore the prompt (a synchronous execution command has finished,
3374 and we're ready for input). */
3375 if (interpreter_async && was_sync && !sync_execution)
3376 observer_notify_sync_execution_done ();
3377
3378 if (cmd_done
3379 && !was_sync
3380 && exec_done_display_p
3381 && (ptid_equal (inferior_ptid, null_ptid)
3382 || !is_running (inferior_ptid)))
3383 printf_unfiltered (_("completed.\n"));
3384 }
3385
3386 /* Record the frame and location we're currently stepping through. */
3387 void
3388 set_step_info (struct frame_info *frame, struct symtab_and_line sal)
3389 {
3390 struct thread_info *tp = inferior_thread ();
3391
3392 tp->control.step_frame_id = get_frame_id (frame);
3393 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
3394
3395 tp->current_symtab = sal.symtab;
3396 tp->current_line = sal.line;
3397 }
3398
3399 /* Clear context switchable stepping state. */
3400
3401 void
3402 init_thread_stepping_state (struct thread_info *tss)
3403 {
3404 tss->stepped_breakpoint = 0;
3405 tss->stepping_over_breakpoint = 0;
3406 tss->stepping_over_watchpoint = 0;
3407 tss->step_after_step_resume_breakpoint = 0;
3408 }
3409
3410 /* Set the cached copy of the last ptid/waitstatus. */
3411
3412 static void
3413 set_last_target_status (ptid_t ptid, struct target_waitstatus status)
3414 {
3415 target_last_wait_ptid = ptid;
3416 target_last_waitstatus = status;
3417 }
3418
3419 /* Return the cached copy of the last pid/waitstatus returned by
3420 target_wait()/deprecated_target_wait_hook(). The data is actually
3421 cached by handle_inferior_event(), which gets called immediately
3422 after target_wait()/deprecated_target_wait_hook(). */
3423
3424 void
3425 get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
3426 {
3427 *ptidp = target_last_wait_ptid;
3428 *status = target_last_waitstatus;
3429 }
3430
3431 void
3432 nullify_last_target_wait_ptid (void)
3433 {
3434 target_last_wait_ptid = minus_one_ptid;
3435 }
3436
3437 /* Switch thread contexts. */
3438
3439 static void
3440 context_switch (ptid_t ptid)
3441 {
3442 if (debug_infrun && !ptid_equal (ptid, inferior_ptid))
3443 {
3444 fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
3445 target_pid_to_str (inferior_ptid));
3446 fprintf_unfiltered (gdb_stdlog, "to %s\n",
3447 target_pid_to_str (ptid));
3448 }
3449
3450 switch_to_thread (ptid);
3451 }
3452
3453 static void
3454 adjust_pc_after_break (struct execution_control_state *ecs)
3455 {
3456 struct regcache *regcache;
3457 struct gdbarch *gdbarch;
3458 struct address_space *aspace;
3459 CORE_ADDR breakpoint_pc, decr_pc;
3460
3461 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
3462 we aren't, just return.
3463
3464 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
3465 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
3466 implemented by software breakpoints should be handled through the normal
3467 breakpoint layer.
3468
3469 NOTE drow/2004-01-31: On some targets, breakpoints may generate
3470 different signals (SIGILL or SIGEMT for instance), but it is less
3471 clear where the PC is pointing afterwards. It may not match
3472 gdbarch_decr_pc_after_break. I don't know any specific target that
3473 generates these signals at breakpoints (the code has been in GDB since at
3474 least 1992) so I can not guess how to handle them here.
3475
3476 In earlier versions of GDB, a target with
3477 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
3478 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
3479 target with both of these set in GDB history, and it seems unlikely to be
3480 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
3481
3482 if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
3483 return;
3484
3485 if (ecs->ws.value.sig != GDB_SIGNAL_TRAP)
3486 return;
3487
3488 /* In reverse execution, when a breakpoint is hit, the instruction
3489 under it has already been de-executed. The reported PC always
3490 points at the breakpoint address, so adjusting it further would
3491 be wrong. E.g., consider this case on a decr_pc_after_break == 1
3492 architecture:
3493
3494 B1 0x08000000 : INSN1
3495 B2 0x08000001 : INSN2
3496 0x08000002 : INSN3
3497 PC -> 0x08000003 : INSN4
3498
3499 Say you're stopped at 0x08000003 as above. Reverse continuing
3500 from that point should hit B2 as below. Reading the PC when the
3501 SIGTRAP is reported should read 0x08000001 and INSN2 should have
3502 been de-executed already.
3503
3504 B1 0x08000000 : INSN1
3505 B2 PC -> 0x08000001 : INSN2
3506 0x08000002 : INSN3
3507 0x08000003 : INSN4
3508
3509 We can't apply the same logic as for forward execution, because
3510 we would wrongly adjust the PC to 0x08000000, since there's a
3511 breakpoint at PC - 1. We'd then report a hit on B1, although
3512 INSN1 hadn't been de-executed yet. Doing nothing is the correct
3513 behaviour. */
3514 if (execution_direction == EXEC_REVERSE)
3515 return;
3516
3517 /* If the target can tell whether the thread hit a SW breakpoint,
3518 trust it. Targets that can tell also adjust the PC
3519 themselves. */
3520 if (target_supports_stopped_by_sw_breakpoint ())
3521 return;
3522
3523 /* Note that relying on whether a breakpoint is planted in memory to
3524 determine this can fail. E.g,. the breakpoint could have been
3525 removed since. Or the thread could have been told to step an
3526 instruction the size of a breakpoint instruction, and only
3527 _after_ was a breakpoint inserted at its address. */
3528
3529 /* If this target does not decrement the PC after breakpoints, then
3530 we have nothing to do. */
3531 regcache = get_thread_regcache (ecs->ptid);
3532 gdbarch = get_regcache_arch (regcache);
3533
3534 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3535 if (decr_pc == 0)
3536 return;
3537
3538 aspace = get_regcache_aspace (regcache);
3539
3540 /* Find the location where (if we've hit a breakpoint) the
3541 breakpoint would be. */
3542 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
3543
3544 /* If the target can't tell whether a software breakpoint triggered,
3545 fallback to figuring it out based on breakpoints we think were
3546 inserted in the target, and on whether the thread was stepped or
3547 continued. */
3548
3549 /* Check whether there actually is a software breakpoint inserted at
3550 that location.
3551
3552 If in non-stop mode, a race condition is possible where we've
3553 removed a breakpoint, but stop events for that breakpoint were
3554 already queued and arrive later. To suppress those spurious
3555 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
3556 and retire them after a number of stop events are reported. Note
3557 this is an heuristic and can thus get confused. The real fix is
3558 to get the "stopped by SW BP and needs adjustment" info out of
3559 the target/kernel (and thus never reach here; see above). */
3560 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
3561 || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
3562 {
3563 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
3564
3565 if (record_full_is_used ())
3566 record_full_gdb_operation_disable_set ();
3567
3568 /* When using hardware single-step, a SIGTRAP is reported for both
3569 a completed single-step and a software breakpoint. Need to
3570 differentiate between the two, as the latter needs adjusting
3571 but the former does not.
3572
3573 The SIGTRAP can be due to a completed hardware single-step only if
3574 - we didn't insert software single-step breakpoints
3575 - this thread is currently being stepped
3576
3577 If any of these events did not occur, we must have stopped due
3578 to hitting a software breakpoint, and have to back up to the
3579 breakpoint address.
3580
3581 As a special case, we could have hardware single-stepped a
3582 software breakpoint. In this case (prev_pc == breakpoint_pc),
3583 we also need to back up to the breakpoint address. */
3584
3585 if (thread_has_single_step_breakpoints_set (ecs->event_thread)
3586 || !currently_stepping (ecs->event_thread)
3587 || (ecs->event_thread->stepped_breakpoint
3588 && ecs->event_thread->prev_pc == breakpoint_pc))
3589 regcache_write_pc (regcache, breakpoint_pc);
3590
3591 do_cleanups (old_cleanups);
3592 }
3593 }
3594
3595 static int
3596 stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
3597 {
3598 for (frame = get_prev_frame (frame);
3599 frame != NULL;
3600 frame = get_prev_frame (frame))
3601 {
3602 if (frame_id_eq (get_frame_id (frame), step_frame_id))
3603 return 1;
3604 if (get_frame_type (frame) != INLINE_FRAME)
3605 break;
3606 }
3607
3608 return 0;
3609 }
3610
3611 /* Auxiliary function that handles syscall entry/return events.
3612 It returns 1 if the inferior should keep going (and GDB
3613 should ignore the event), or 0 if the event deserves to be
3614 processed. */
3615
3616 static int
3617 handle_syscall_event (struct execution_control_state *ecs)
3618 {
3619 struct regcache *regcache;
3620 int syscall_number;
3621
3622 if (!ptid_equal (ecs->ptid, inferior_ptid))
3623 context_switch (ecs->ptid);
3624
3625 regcache = get_thread_regcache (ecs->ptid);
3626 syscall_number = ecs->ws.value.syscall_number;
3627 stop_pc = regcache_read_pc (regcache);
3628
3629 if (catch_syscall_enabled () > 0
3630 && catching_syscall_number (syscall_number) > 0)
3631 {
3632 if (debug_infrun)
3633 fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
3634 syscall_number);
3635
3636 ecs->event_thread->control.stop_bpstat
3637 = bpstat_stop_status (get_regcache_aspace (regcache),
3638 stop_pc, ecs->ptid, &ecs->ws);
3639
3640 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3641 {
3642 /* Catchpoint hit. */
3643 return 0;
3644 }
3645 }
3646
3647 /* If no catchpoint triggered for this, then keep going. */
3648 keep_going (ecs);
3649 return 1;
3650 }
3651
3652 /* Lazily fill in the execution_control_state's stop_func_* fields. */
3653
3654 static void
3655 fill_in_stop_func (struct gdbarch *gdbarch,
3656 struct execution_control_state *ecs)
3657 {
3658 if (!ecs->stop_func_filled_in)
3659 {
3660 /* Don't care about return value; stop_func_start and stop_func_name
3661 will both be 0 if it doesn't work. */
3662 find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3663 &ecs->stop_func_start, &ecs->stop_func_end);
3664 ecs->stop_func_start
3665 += gdbarch_deprecated_function_start_offset (gdbarch);
3666
3667 if (gdbarch_skip_entrypoint_p (gdbarch))
3668 ecs->stop_func_start = gdbarch_skip_entrypoint (gdbarch,
3669 ecs->stop_func_start);
3670
3671 ecs->stop_func_filled_in = 1;
3672 }
3673 }
3674
3675
3676 /* Return the STOP_SOON field of the inferior pointed at by PTID. */
3677
3678 static enum stop_kind
3679 get_inferior_stop_soon (ptid_t ptid)
3680 {
3681 struct inferior *inf = find_inferior_ptid (ptid);
3682
3683 gdb_assert (inf != NULL);
3684 return inf->control.stop_soon;
3685 }
3686
3687 /* Given an execution control state that has been freshly filled in by
3688 an event from the inferior, figure out what it means and take
3689 appropriate action.
3690
3691 The alternatives are:
3692
3693 1) stop_waiting and return; to really stop and return to the
3694 debugger.
3695
3696 2) keep_going and return; to wait for the next event (set
3697 ecs->event_thread->stepping_over_breakpoint to 1 to single step
3698 once). */
3699
3700 static void
3701 handle_inferior_event (struct execution_control_state *ecs)
3702 {
3703 enum stop_kind stop_soon;
3704
3705 if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
3706 {
3707 /* We had an event in the inferior, but we are not interested in
3708 handling it at this level. The lower layers have already
3709 done what needs to be done, if anything.
3710
3711 One of the possible circumstances for this is when the
3712 inferior produces output for the console. The inferior has
3713 not stopped, and we are ignoring the event. Another possible
3714 circumstance is any event which the lower level knows will be
3715 reported multiple times without an intervening resume. */
3716 if (debug_infrun)
3717 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
3718 prepare_to_wait (ecs);
3719 return;
3720 }
3721
3722 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED
3723 && target_can_async_p () && !sync_execution)
3724 {
3725 /* There were no unwaited-for children left in the target, but,
3726 we're not synchronously waiting for events either. Just
3727 ignore. Otherwise, if we were running a synchronous
3728 execution command, we need to cancel it and give the user
3729 back the terminal. */
3730 if (debug_infrun)
3731 fprintf_unfiltered (gdb_stdlog,
3732 "infrun: TARGET_WAITKIND_NO_RESUMED (ignoring)\n");
3733 prepare_to_wait (ecs);
3734 return;
3735 }
3736
3737 /* Cache the last pid/waitstatus. */
3738 set_last_target_status (ecs->ptid, ecs->ws);
3739
3740 /* Always clear state belonging to the previous time we stopped. */
3741 stop_stack_dummy = STOP_NONE;
3742
3743 if (ecs->ws.kind == TARGET_WAITKIND_NO_RESUMED)
3744 {
3745 /* No unwaited-for children left. IOW, all resumed children
3746 have exited. */
3747 if (debug_infrun)
3748 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_RESUMED\n");
3749
3750 stop_print_frame = 0;
3751 stop_waiting (ecs);
3752 return;
3753 }
3754
3755 if (ecs->ws.kind != TARGET_WAITKIND_EXITED
3756 && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
3757 {
3758 ecs->event_thread = find_thread_ptid (ecs->ptid);
3759 /* If it's a new thread, add it to the thread database. */
3760 if (ecs->event_thread == NULL)
3761 ecs->event_thread = add_thread (ecs->ptid);
3762
3763 /* Disable range stepping. If the next step request could use a
3764 range, this will be end up re-enabled then. */
3765 ecs->event_thread->control.may_range_step = 0;
3766 }
3767
3768 /* Dependent on valid ECS->EVENT_THREAD. */
3769 adjust_pc_after_break (ecs);
3770
3771 /* Dependent on the current PC value modified by adjust_pc_after_break. */
3772 reinit_frame_cache ();
3773
3774 breakpoint_retire_moribund ();
3775
3776 /* First, distinguish signals caused by the debugger from signals
3777 that have to do with the program's own actions. Note that
3778 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3779 on the operating system version. Here we detect when a SIGILL or
3780 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
3781 something similar for SIGSEGV, since a SIGSEGV will be generated
3782 when we're trying to execute a breakpoint instruction on a
3783 non-executable stack. This happens for call dummy breakpoints
3784 for architectures like SPARC that place call dummies on the
3785 stack. */
3786 if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3787 && (ecs->ws.value.sig == GDB_SIGNAL_ILL
3788 || ecs->ws.value.sig == GDB_SIGNAL_SEGV
3789 || ecs->ws.value.sig == GDB_SIGNAL_EMT))
3790 {
3791 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3792
3793 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3794 regcache_read_pc (regcache)))
3795 {
3796 if (debug_infrun)
3797 fprintf_unfiltered (gdb_stdlog,
3798 "infrun: Treating signal as SIGTRAP\n");
3799 ecs->ws.value.sig = GDB_SIGNAL_TRAP;
3800 }
3801 }
3802
3803 /* Mark the non-executing threads accordingly. In all-stop, all
3804 threads of all processes are stopped when we get any event
3805 reported. In non-stop mode, only the event thread stops. If
3806 we're handling a process exit in non-stop mode, there's nothing
3807 to do, as threads of the dead process are gone, and threads of
3808 any other process were left running. */
3809 if (!non_stop)
3810 set_executing (minus_one_ptid, 0);
3811 else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3812 && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3813 set_executing (ecs->ptid, 0);
3814
3815 switch (ecs->ws.kind)
3816 {
3817 case TARGET_WAITKIND_LOADED:
3818 if (debug_infrun)
3819 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3820 if (!ptid_equal (ecs->ptid, inferior_ptid))
3821 context_switch (ecs->ptid);
3822 /* Ignore gracefully during startup of the inferior, as it might
3823 be the shell which has just loaded some objects, otherwise
3824 add the symbols for the newly loaded objects. Also ignore at
3825 the beginning of an attach or remote session; we will query
3826 the full list of libraries once the connection is
3827 established. */
3828
3829 stop_soon = get_inferior_stop_soon (ecs->ptid);
3830 if (stop_soon == NO_STOP_QUIETLY)
3831 {
3832 struct regcache *regcache;
3833
3834 regcache = get_thread_regcache (ecs->ptid);
3835
3836 handle_solib_event ();
3837
3838 ecs->event_thread->control.stop_bpstat
3839 = bpstat_stop_status (get_regcache_aspace (regcache),
3840 stop_pc, ecs->ptid, &ecs->ws);
3841
3842 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
3843 {
3844 /* A catchpoint triggered. */
3845 process_event_stop_test (ecs);
3846 return;
3847 }
3848
3849 /* If requested, stop when the dynamic linker notifies
3850 gdb of events. This allows the user to get control
3851 and place breakpoints in initializer routines for
3852 dynamically loaded objects (among other things). */
3853 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
3854 if (stop_on_solib_events)
3855 {
3856 /* Make sure we print "Stopped due to solib-event" in
3857 normal_stop. */
3858 stop_print_frame = 1;
3859
3860 stop_waiting (ecs);
3861 return;
3862 }
3863 }
3864
3865 /* If we are skipping through a shell, or through shared library
3866 loading that we aren't interested in, resume the program. If
3867 we're running the program normally, also resume. */
3868 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3869 {
3870 /* Loading of shared libraries might have changed breakpoint
3871 addresses. Make sure new breakpoints are inserted. */
3872 if (stop_soon == NO_STOP_QUIETLY)
3873 insert_breakpoints ();
3874 resume (GDB_SIGNAL_0);
3875 prepare_to_wait (ecs);
3876 return;
3877 }
3878
3879 /* But stop if we're attaching or setting up a remote
3880 connection. */
3881 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3882 || stop_soon == STOP_QUIETLY_REMOTE)
3883 {
3884 if (debug_infrun)
3885 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3886 stop_waiting (ecs);
3887 return;
3888 }
3889
3890 internal_error (__FILE__, __LINE__,
3891 _("unhandled stop_soon: %d"), (int) stop_soon);
3892
3893 case TARGET_WAITKIND_SPURIOUS:
3894 if (debug_infrun)
3895 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3896 if (!ptid_equal (ecs->ptid, inferior_ptid))
3897 context_switch (ecs->ptid);
3898 resume (GDB_SIGNAL_0);
3899 prepare_to_wait (ecs);
3900 return;
3901
3902 case TARGET_WAITKIND_EXITED:
3903 case TARGET_WAITKIND_SIGNALLED:
3904 if (debug_infrun)
3905 {
3906 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3907 fprintf_unfiltered (gdb_stdlog,
3908 "infrun: TARGET_WAITKIND_EXITED\n");
3909 else
3910 fprintf_unfiltered (gdb_stdlog,
3911 "infrun: TARGET_WAITKIND_SIGNALLED\n");
3912 }
3913
3914 inferior_ptid = ecs->ptid;
3915 set_current_inferior (find_inferior_ptid (ecs->ptid));
3916 set_current_program_space (current_inferior ()->pspace);
3917 handle_vfork_child_exec_or_exit (0);
3918 target_terminal_ours (); /* Must do this before mourn anyway. */
3919
3920 /* Clearing any previous state of convenience variables. */
3921 clear_exit_convenience_vars ();
3922
3923 if (ecs->ws.kind == TARGET_WAITKIND_EXITED)
3924 {
3925 /* Record the exit code in the convenience variable $_exitcode, so
3926 that the user can inspect this again later. */
3927 set_internalvar_integer (lookup_internalvar ("_exitcode"),
3928 (LONGEST) ecs->ws.value.integer);
3929
3930 /* Also record this in the inferior itself. */
3931 current_inferior ()->has_exit_code = 1;
3932 current_inferior ()->exit_code = (LONGEST) ecs->ws.value.integer;
3933
3934 /* Support the --return-child-result option. */
3935 return_child_result_value = ecs->ws.value.integer;
3936
3937 observer_notify_exited (ecs->ws.value.integer);
3938 }
3939 else
3940 {
3941 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3942 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3943
3944 if (gdbarch_gdb_signal_to_target_p (gdbarch))
3945 {
3946 /* Set the value of the internal variable $_exitsignal,
3947 which holds the signal uncaught by the inferior. */
3948 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
3949 gdbarch_gdb_signal_to_target (gdbarch,
3950 ecs->ws.value.sig));
3951 }
3952 else
3953 {
3954 /* We don't have access to the target's method used for
3955 converting between signal numbers (GDB's internal
3956 representation <-> target's representation).
3957 Therefore, we cannot do a good job at displaying this
3958 information to the user. It's better to just warn
3959 her about it (if infrun debugging is enabled), and
3960 give up. */
3961 if (debug_infrun)
3962 fprintf_filtered (gdb_stdlog, _("\
3963 Cannot fill $_exitsignal with the correct signal number.\n"));
3964 }
3965
3966 observer_notify_signal_exited (ecs->ws.value.sig);
3967 }
3968
3969 gdb_flush (gdb_stdout);
3970 target_mourn_inferior ();
3971 stop_print_frame = 0;
3972 stop_waiting (ecs);
3973 return;
3974
3975 /* The following are the only cases in which we keep going;
3976 the above cases end in a continue or goto. */
3977 case TARGET_WAITKIND_FORKED:
3978 case TARGET_WAITKIND_VFORKED:
3979 if (debug_infrun)
3980 {
3981 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
3982 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3983 else
3984 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORKED\n");
3985 }
3986
3987 /* Check whether the inferior is displaced stepping. */
3988 {
3989 struct regcache *regcache = get_thread_regcache (ecs->ptid);
3990 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3991 struct displaced_step_inferior_state *displaced
3992 = get_displaced_stepping_state (ptid_get_pid (ecs->ptid));
3993
3994 /* If checking displaced stepping is supported, and thread
3995 ecs->ptid is displaced stepping. */
3996 if (displaced && ptid_equal (displaced->step_ptid, ecs->ptid))
3997 {
3998 struct inferior *parent_inf
3999 = find_inferior_ptid (ecs->ptid);
4000 struct regcache *child_regcache;
4001 CORE_ADDR parent_pc;
4002
4003 /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
4004 indicating that the displaced stepping of syscall instruction
4005 has been done. Perform cleanup for parent process here. Note
4006 that this operation also cleans up the child process for vfork,
4007 because their pages are shared. */
4008 displaced_step_fixup (ecs->ptid, GDB_SIGNAL_TRAP);
4009
4010 if (ecs->ws.kind == TARGET_WAITKIND_FORKED)
4011 {
4012 /* Restore scratch pad for child process. */
4013 displaced_step_restore (displaced, ecs->ws.value.related_pid);
4014 }
4015
4016 /* Since the vfork/fork syscall instruction was executed in the scratchpad,
4017 the child's PC is also within the scratchpad. Set the child's PC
4018 to the parent's PC value, which has already been fixed up.
4019 FIXME: we use the parent's aspace here, although we're touching
4020 the child, because the child hasn't been added to the inferior
4021 list yet at this point. */
4022
4023 child_regcache
4024 = get_thread_arch_aspace_regcache (ecs->ws.value.related_pid,
4025 gdbarch,
4026 parent_inf->aspace);
4027 /* Read PC value of parent process. */
4028 parent_pc = regcache_read_pc (regcache);
4029
4030 if (debug_displaced)
4031 fprintf_unfiltered (gdb_stdlog,
4032 "displaced: write child pc from %s to %s\n",
4033 paddress (gdbarch,
4034 regcache_read_pc (child_regcache)),
4035 paddress (gdbarch, parent_pc));
4036
4037 regcache_write_pc (child_regcache, parent_pc);
4038 }
4039 }
4040
4041 if (!ptid_equal (ecs->ptid, inferior_ptid))
4042 context_switch (ecs->ptid);
4043
4044 /* Immediately detach breakpoints from the child before there's
4045 any chance of letting the user delete breakpoints from the
4046 breakpoint lists. If we don't do this early, it's easy to
4047 leave left over traps in the child, vis: "break foo; catch
4048 fork; c; <fork>; del; c; <child calls foo>". We only follow
4049 the fork on the last `continue', and by that time the
4050 breakpoint at "foo" is long gone from the breakpoint table.
4051 If we vforked, then we don't need to unpatch here, since both
4052 parent and child are sharing the same memory pages; we'll
4053 need to unpatch at follow/detach time instead to be certain
4054 that new breakpoints added between catchpoint hit time and
4055 vfork follow are detached. */
4056 if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
4057 {
4058 /* This won't actually modify the breakpoint list, but will
4059 physically remove the breakpoints from the child. */
4060 detach_breakpoints (ecs->ws.value.related_pid);
4061 }
4062
4063 delete_just_stopped_threads_single_step_breakpoints ();
4064
4065 /* In case the event is caught by a catchpoint, remember that
4066 the event is to be followed at the next resume of the thread,
4067 and not immediately. */
4068 ecs->event_thread->pending_follow = ecs->ws;
4069
4070 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4071
4072 ecs->event_thread->control.stop_bpstat
4073 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4074 stop_pc, ecs->ptid, &ecs->ws);
4075
4076 /* If no catchpoint triggered for this, then keep going. Note
4077 that we're interested in knowing the bpstat actually causes a
4078 stop, not just if it may explain the signal. Software
4079 watchpoints, for example, always appear in the bpstat. */
4080 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4081 {
4082 ptid_t parent;
4083 ptid_t child;
4084 int should_resume;
4085 int follow_child
4086 = (follow_fork_mode_string == follow_fork_mode_child);
4087
4088 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4089
4090 should_resume = follow_fork ();
4091
4092 parent = ecs->ptid;
4093 child = ecs->ws.value.related_pid;
4094
4095 /* In non-stop mode, also resume the other branch. */
4096 if (non_stop && !detach_fork)
4097 {
4098 if (follow_child)
4099 switch_to_thread (parent);
4100 else
4101 switch_to_thread (child);
4102
4103 ecs->event_thread = inferior_thread ();
4104 ecs->ptid = inferior_ptid;
4105 keep_going (ecs);
4106 }
4107
4108 if (follow_child)
4109 switch_to_thread (child);
4110 else
4111 switch_to_thread (parent);
4112
4113 ecs->event_thread = inferior_thread ();
4114 ecs->ptid = inferior_ptid;
4115
4116 if (should_resume)
4117 keep_going (ecs);
4118 else
4119 stop_waiting (ecs);
4120 return;
4121 }
4122 process_event_stop_test (ecs);
4123 return;
4124
4125 case TARGET_WAITKIND_VFORK_DONE:
4126 /* Done with the shared memory region. Re-insert breakpoints in
4127 the parent, and keep going. */
4128
4129 if (debug_infrun)
4130 fprintf_unfiltered (gdb_stdlog,
4131 "infrun: TARGET_WAITKIND_VFORK_DONE\n");
4132
4133 if (!ptid_equal (ecs->ptid, inferior_ptid))
4134 context_switch (ecs->ptid);
4135
4136 current_inferior ()->waiting_for_vfork_done = 0;
4137 current_inferior ()->pspace->breakpoints_not_allowed = 0;
4138 /* This also takes care of reinserting breakpoints in the
4139 previously locked inferior. */
4140 keep_going (ecs);
4141 return;
4142
4143 case TARGET_WAITKIND_EXECD:
4144 if (debug_infrun)
4145 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
4146
4147 if (!ptid_equal (ecs->ptid, inferior_ptid))
4148 context_switch (ecs->ptid);
4149
4150 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4151
4152 /* Do whatever is necessary to the parent branch of the vfork. */
4153 handle_vfork_child_exec_or_exit (1);
4154
4155 /* This causes the eventpoints and symbol table to be reset.
4156 Must do this now, before trying to determine whether to
4157 stop. */
4158 follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
4159
4160 ecs->event_thread->control.stop_bpstat
4161 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4162 stop_pc, ecs->ptid, &ecs->ws);
4163
4164 /* Note that this may be referenced from inside
4165 bpstat_stop_status above, through inferior_has_execd. */
4166 xfree (ecs->ws.value.execd_pathname);
4167 ecs->ws.value.execd_pathname = NULL;
4168
4169 /* If no catchpoint triggered for this, then keep going. */
4170 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
4171 {
4172 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4173 keep_going (ecs);
4174 return;
4175 }
4176 process_event_stop_test (ecs);
4177 return;
4178
4179 /* Be careful not to try to gather much state about a thread
4180 that's in a syscall. It's frequently a losing proposition. */
4181 case TARGET_WAITKIND_SYSCALL_ENTRY:
4182 if (debug_infrun)
4183 fprintf_unfiltered (gdb_stdlog,
4184 "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
4185 /* Getting the current syscall number. */
4186 if (handle_syscall_event (ecs) == 0)
4187 process_event_stop_test (ecs);
4188 return;
4189
4190 /* Before examining the threads further, step this thread to
4191 get it entirely out of the syscall. (We get notice of the
4192 event when the thread is just on the verge of exiting a
4193 syscall. Stepping one instruction seems to get it back
4194 into user code.) */
4195 case TARGET_WAITKIND_SYSCALL_RETURN:
4196 if (debug_infrun)
4197 fprintf_unfiltered (gdb_stdlog,
4198 "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
4199 if (handle_syscall_event (ecs) == 0)
4200 process_event_stop_test (ecs);
4201 return;
4202
4203 case TARGET_WAITKIND_STOPPED:
4204 if (debug_infrun)
4205 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
4206 ecs->event_thread->suspend.stop_signal = ecs->ws.value.sig;
4207 handle_signal_stop (ecs);
4208 return;
4209
4210 case TARGET_WAITKIND_NO_HISTORY:
4211 if (debug_infrun)
4212 fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_NO_HISTORY\n");
4213 /* Reverse execution: target ran out of history info. */
4214
4215 delete_just_stopped_threads_single_step_breakpoints ();
4216 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4217 observer_notify_no_history ();
4218 stop_waiting (ecs);
4219 return;
4220 }
4221 }
4222
4223 /* Come here when the program has stopped with a signal. */
4224
4225 static void
4226 handle_signal_stop (struct execution_control_state *ecs)
4227 {
4228 struct frame_info *frame;
4229 struct gdbarch *gdbarch;
4230 int stopped_by_watchpoint;
4231 enum stop_kind stop_soon;
4232 int random_signal;
4233
4234 gdb_assert (ecs->ws.kind == TARGET_WAITKIND_STOPPED);
4235
4236 /* Do we need to clean up the state of a thread that has
4237 completed a displaced single-step? (Doing so usually affects
4238 the PC, so do it here, before we set stop_pc.) */
4239 displaced_step_fixup (ecs->ptid,
4240 ecs->event_thread->suspend.stop_signal);
4241
4242 /* If we either finished a single-step or hit a breakpoint, but
4243 the user wanted this thread to be stopped, pretend we got a
4244 SIG0 (generic unsignaled stop). */
4245 if (ecs->event_thread->stop_requested
4246 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4247 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4248
4249 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
4250
4251 if (debug_infrun)
4252 {
4253 struct regcache *regcache = get_thread_regcache (ecs->ptid);
4254 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4255 struct cleanup *old_chain = save_inferior_ptid ();
4256
4257 inferior_ptid = ecs->ptid;
4258
4259 fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
4260 paddress (gdbarch, stop_pc));
4261 if (target_stopped_by_watchpoint ())
4262 {
4263 CORE_ADDR addr;
4264
4265 fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
4266
4267 if (target_stopped_data_address (&current_target, &addr))
4268 fprintf_unfiltered (gdb_stdlog,
4269 "infrun: stopped data address = %s\n",
4270 paddress (gdbarch, addr));
4271 else
4272 fprintf_unfiltered (gdb_stdlog,
4273 "infrun: (no data address available)\n");
4274 }
4275
4276 do_cleanups (old_chain);
4277 }
4278
4279 /* This is originated from start_remote(), start_inferior() and
4280 shared libraries hook functions. */
4281 stop_soon = get_inferior_stop_soon (ecs->ptid);
4282 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
4283 {
4284 if (!ptid_equal (ecs->ptid, inferior_ptid))
4285 context_switch (ecs->ptid);
4286 if (debug_infrun)
4287 fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
4288 stop_print_frame = 1;
4289 stop_waiting (ecs);
4290 return;
4291 }
4292
4293 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4294 && stop_after_trap)
4295 {
4296 if (!ptid_equal (ecs->ptid, inferior_ptid))
4297 context_switch (ecs->ptid);
4298 if (debug_infrun)
4299 fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
4300 stop_print_frame = 0;
4301 stop_waiting (ecs);
4302 return;
4303 }
4304
4305 /* This originates from attach_command(). We need to overwrite
4306 the stop_signal here, because some kernels don't ignore a
4307 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
4308 See more comments in inferior.h. On the other hand, if we
4309 get a non-SIGSTOP, report it to the user - assume the backend
4310 will handle the SIGSTOP if it should show up later.
4311
4312 Also consider that the attach is complete when we see a
4313 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
4314 target extended-remote report it instead of a SIGSTOP
4315 (e.g. gdbserver). We already rely on SIGTRAP being our
4316 signal, so this is no exception.
4317
4318 Also consider that the attach is complete when we see a
4319 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
4320 the target to stop all threads of the inferior, in case the
4321 low level attach operation doesn't stop them implicitly. If
4322 they weren't stopped implicitly, then the stub will report a
4323 GDB_SIGNAL_0, meaning: stopped for no particular reason
4324 other than GDB's request. */
4325 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
4326 && (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_STOP
4327 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4328 || ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_0))
4329 {
4330 stop_print_frame = 1;
4331 stop_waiting (ecs);
4332 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4333 return;
4334 }
4335
4336 /* See if something interesting happened to the non-current thread. If
4337 so, then switch to that thread. */
4338 if (!ptid_equal (ecs->ptid, inferior_ptid))
4339 {
4340 if (debug_infrun)
4341 fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
4342
4343 context_switch (ecs->ptid);
4344
4345 if (deprecated_context_hook)
4346 deprecated_context_hook (pid_to_thread_id (ecs->ptid));
4347 }
4348
4349 /* At this point, get hold of the now-current thread's frame. */
4350 frame = get_current_frame ();
4351 gdbarch = get_frame_arch (frame);
4352
4353 /* Pull the single step breakpoints out of the target. */
4354 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP)
4355 {
4356 struct regcache *regcache;
4357 struct address_space *aspace;
4358 CORE_ADDR pc;
4359
4360 regcache = get_thread_regcache (ecs->ptid);
4361 aspace = get_regcache_aspace (regcache);
4362 pc = regcache_read_pc (regcache);
4363
4364 /* However, before doing so, if this single-step breakpoint was
4365 actually for another thread, set this thread up for moving
4366 past it. */
4367 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
4368 aspace, pc))
4369 {
4370 if (single_step_breakpoint_inserted_here_p (aspace, pc))
4371 {
4372 if (debug_infrun)
4373 {
4374 fprintf_unfiltered (gdb_stdlog,
4375 "infrun: [%s] hit another thread's "
4376 "single-step breakpoint\n",
4377 target_pid_to_str (ecs->ptid));
4378 }
4379 ecs->hit_singlestep_breakpoint = 1;
4380 }
4381 }
4382 else
4383 {
4384 if (debug_infrun)
4385 {
4386 fprintf_unfiltered (gdb_stdlog,
4387 "infrun: [%s] hit its "
4388 "single-step breakpoint\n",
4389 target_pid_to_str (ecs->ptid));
4390 }
4391 }
4392 }
4393 delete_just_stopped_threads_single_step_breakpoints ();
4394
4395 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4396 && ecs->event_thread->control.trap_expected
4397 && ecs->event_thread->stepping_over_watchpoint)
4398 stopped_by_watchpoint = 0;
4399 else
4400 stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
4401
4402 /* If necessary, step over this watchpoint. We'll be back to display
4403 it in a moment. */
4404 if (stopped_by_watchpoint
4405 && (target_have_steppable_watchpoint
4406 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
4407 {
4408 /* At this point, we are stopped at an instruction which has
4409 attempted to write to a piece of memory under control of
4410 a watchpoint. The instruction hasn't actually executed
4411 yet. If we were to evaluate the watchpoint expression
4412 now, we would get the old value, and therefore no change
4413 would seem to have occurred.
4414
4415 In order to make watchpoints work `right', we really need
4416 to complete the memory write, and then evaluate the
4417 watchpoint expression. We do this by single-stepping the
4418 target.
4419
4420 It may not be necessary to disable the watchpoint to step over
4421 it. For example, the PA can (with some kernel cooperation)
4422 single step over a watchpoint without disabling the watchpoint.
4423
4424 It is far more common to need to disable a watchpoint to step
4425 the inferior over it. If we have non-steppable watchpoints,
4426 we must disable the current watchpoint; it's simplest to
4427 disable all watchpoints.
4428
4429 Any breakpoint at PC must also be stepped over -- if there's
4430 one, it will have already triggered before the watchpoint
4431 triggered, and we either already reported it to the user, or
4432 it didn't cause a stop and we called keep_going. In either
4433 case, if there was a breakpoint at PC, we must be trying to
4434 step past it. */
4435 ecs->event_thread->stepping_over_watchpoint = 1;
4436 keep_going (ecs);
4437 return;
4438 }
4439
4440 ecs->event_thread->stepping_over_breakpoint = 0;
4441 ecs->event_thread->stepping_over_watchpoint = 0;
4442 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
4443 ecs->event_thread->control.stop_step = 0;
4444 stop_print_frame = 1;
4445 stopped_by_random_signal = 0;
4446
4447 /* Hide inlined functions starting here, unless we just performed stepi or
4448 nexti. After stepi and nexti, always show the innermost frame (not any
4449 inline function call sites). */
4450 if (ecs->event_thread->control.step_range_end != 1)
4451 {
4452 struct address_space *aspace =
4453 get_regcache_aspace (get_thread_regcache (ecs->ptid));
4454
4455 /* skip_inline_frames is expensive, so we avoid it if we can
4456 determine that the address is one where functions cannot have
4457 been inlined. This improves performance with inferiors that
4458 load a lot of shared libraries, because the solib event
4459 breakpoint is defined as the address of a function (i.e. not
4460 inline). Note that we have to check the previous PC as well
4461 as the current one to catch cases when we have just
4462 single-stepped off a breakpoint prior to reinstating it.
4463 Note that we're assuming that the code we single-step to is
4464 not inline, but that's not definitive: there's nothing
4465 preventing the event breakpoint function from containing
4466 inlined code, and the single-step ending up there. If the
4467 user had set a breakpoint on that inlined code, the missing
4468 skip_inline_frames call would break things. Fortunately
4469 that's an extremely unlikely scenario. */
4470 if (!pc_at_non_inline_function (aspace, stop_pc, &ecs->ws)
4471 && !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4472 && ecs->event_thread->control.trap_expected
4473 && pc_at_non_inline_function (aspace,
4474 ecs->event_thread->prev_pc,
4475 &ecs->ws)))
4476 {
4477 skip_inline_frames (ecs->ptid);
4478
4479 /* Re-fetch current thread's frame in case that invalidated
4480 the frame cache. */
4481 frame = get_current_frame ();
4482 gdbarch = get_frame_arch (frame);
4483 }
4484 }
4485
4486 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4487 && ecs->event_thread->control.trap_expected
4488 && gdbarch_single_step_through_delay_p (gdbarch)
4489 && currently_stepping (ecs->event_thread))
4490 {
4491 /* We're trying to step off a breakpoint. Turns out that we're
4492 also on an instruction that needs to be stepped multiple
4493 times before it's been fully executing. E.g., architectures
4494 with a delay slot. It needs to be stepped twice, once for
4495 the instruction and once for the delay slot. */
4496 int step_through_delay
4497 = gdbarch_single_step_through_delay (gdbarch, frame);
4498
4499 if (debug_infrun && step_through_delay)
4500 fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
4501 if (ecs->event_thread->control.step_range_end == 0
4502 && step_through_delay)
4503 {
4504 /* The user issued a continue when stopped at a breakpoint.
4505 Set up for another trap and get out of here. */
4506 ecs->event_thread->stepping_over_breakpoint = 1;
4507 keep_going (ecs);
4508 return;
4509 }
4510 else if (step_through_delay)
4511 {
4512 /* The user issued a step when stopped at a breakpoint.
4513 Maybe we should stop, maybe we should not - the delay
4514 slot *might* correspond to a line of source. In any
4515 case, don't decide that here, just set
4516 ecs->stepping_over_breakpoint, making sure we
4517 single-step again before breakpoints are re-inserted. */
4518 ecs->event_thread->stepping_over_breakpoint = 1;
4519 }
4520 }
4521
4522 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
4523 handles this event. */
4524 ecs->event_thread->control.stop_bpstat
4525 = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
4526 stop_pc, ecs->ptid, &ecs->ws);
4527
4528 /* Following in case break condition called a
4529 function. */
4530 stop_print_frame = 1;
4531
4532 /* This is where we handle "moribund" watchpoints. Unlike
4533 software breakpoints traps, hardware watchpoint traps are
4534 always distinguishable from random traps. If no high-level
4535 watchpoint is associated with the reported stop data address
4536 anymore, then the bpstat does not explain the signal ---
4537 simply make sure to ignore it if `stopped_by_watchpoint' is
4538 set. */
4539
4540 if (debug_infrun
4541 && ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4542 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4543 GDB_SIGNAL_TRAP)
4544 && stopped_by_watchpoint)
4545 fprintf_unfiltered (gdb_stdlog,
4546 "infrun: no user watchpoint explains "
4547 "watchpoint SIGTRAP, ignoring\n");
4548
4549 /* NOTE: cagney/2003-03-29: These checks for a random signal
4550 at one stage in the past included checks for an inferior
4551 function call's call dummy's return breakpoint. The original
4552 comment, that went with the test, read:
4553
4554 ``End of a stack dummy. Some systems (e.g. Sony news) give
4555 another signal besides SIGTRAP, so check here as well as
4556 above.''
4557
4558 If someone ever tries to get call dummys on a
4559 non-executable stack to work (where the target would stop
4560 with something like a SIGSEGV), then those tests might need
4561 to be re-instated. Given, however, that the tests were only
4562 enabled when momentary breakpoints were not being used, I
4563 suspect that it won't be the case.
4564
4565 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
4566 be necessary for call dummies on a non-executable stack on
4567 SPARC. */
4568
4569 /* See if the breakpoints module can explain the signal. */
4570 random_signal
4571 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
4572 ecs->event_thread->suspend.stop_signal);
4573
4574 /* Maybe this was a trap for a software breakpoint that has since
4575 been removed. */
4576 if (random_signal && target_stopped_by_sw_breakpoint ())
4577 {
4578 if (program_breakpoint_here_p (gdbarch, stop_pc))
4579 {
4580 struct regcache *regcache;
4581 int decr_pc;
4582
4583 /* Re-adjust PC to what the program would see if GDB was not
4584 debugging it. */
4585 regcache = get_thread_regcache (ecs->event_thread->ptid);
4586 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4587 if (decr_pc != 0)
4588 {
4589 struct cleanup *old_cleanups = make_cleanup (null_cleanup, NULL);
4590
4591 if (record_full_is_used ())
4592 record_full_gdb_operation_disable_set ();
4593
4594 regcache_write_pc (regcache, stop_pc + decr_pc);
4595
4596 do_cleanups (old_cleanups);
4597 }
4598 }
4599 else
4600 {
4601 /* A delayed software breakpoint event. Ignore the trap. */
4602 if (debug_infrun)
4603 fprintf_unfiltered (gdb_stdlog,
4604 "infrun: delayed software breakpoint "
4605 "trap, ignoring\n");
4606 random_signal = 0;
4607 }
4608 }
4609
4610 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
4611 has since been removed. */
4612 if (random_signal && target_stopped_by_hw_breakpoint ())
4613 {
4614 /* A delayed hardware breakpoint event. Ignore the trap. */
4615 if (debug_infrun)
4616 fprintf_unfiltered (gdb_stdlog,
4617 "infrun: delayed hardware breakpoint/watchpoint "
4618 "trap, ignoring\n");
4619 random_signal = 0;
4620 }
4621
4622 /* If not, perhaps stepping/nexting can. */
4623 if (random_signal)
4624 random_signal = !(ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
4625 && currently_stepping (ecs->event_thread));
4626
4627 /* Perhaps the thread hit a single-step breakpoint of _another_
4628 thread. Single-step breakpoints are transparent to the
4629 breakpoints module. */
4630 if (random_signal)
4631 random_signal = !ecs->hit_singlestep_breakpoint;
4632
4633 /* No? Perhaps we got a moribund watchpoint. */
4634 if (random_signal)
4635 random_signal = !stopped_by_watchpoint;
4636
4637 /* For the program's own signals, act according to
4638 the signal handling tables. */
4639
4640 if (random_signal)
4641 {
4642 /* Signal not for debugging purposes. */
4643 struct inferior *inf = find_inferior_ptid (ecs->ptid);
4644 enum gdb_signal stop_signal = ecs->event_thread->suspend.stop_signal;
4645
4646 if (debug_infrun)
4647 fprintf_unfiltered (gdb_stdlog, "infrun: random signal (%s)\n",
4648 gdb_signal_to_symbol_string (stop_signal));
4649
4650 stopped_by_random_signal = 1;
4651
4652 /* Always stop on signals if we're either just gaining control
4653 of the program, or the user explicitly requested this thread
4654 to remain stopped. */
4655 if (stop_soon != NO_STOP_QUIETLY
4656 || ecs->event_thread->stop_requested
4657 || (!inf->detaching
4658 && signal_stop_state (ecs->event_thread->suspend.stop_signal)))
4659 {
4660 stop_waiting (ecs);
4661 return;
4662 }
4663
4664 /* Notify observers the signal has "handle print" set. Note we
4665 returned early above if stopping; normal_stop handles the
4666 printing in that case. */
4667 if (signal_print[ecs->event_thread->suspend.stop_signal])
4668 {
4669 /* The signal table tells us to print about this signal. */
4670 target_terminal_ours_for_output ();
4671 observer_notify_signal_received (ecs->event_thread->suspend.stop_signal);
4672 target_terminal_inferior ();
4673 }
4674
4675 /* Clear the signal if it should not be passed. */
4676 if (signal_program[ecs->event_thread->suspend.stop_signal] == 0)
4677 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
4678
4679 if (ecs->event_thread->prev_pc == stop_pc
4680 && ecs->event_thread->control.trap_expected
4681 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4682 {
4683 /* We were just starting a new sequence, attempting to
4684 single-step off of a breakpoint and expecting a SIGTRAP.
4685 Instead this signal arrives. This signal will take us out
4686 of the stepping range so GDB needs to remember to, when
4687 the signal handler returns, resume stepping off that
4688 breakpoint. */
4689 /* To simplify things, "continue" is forced to use the same
4690 code paths as single-step - set a breakpoint at the
4691 signal return address and then, once hit, step off that
4692 breakpoint. */
4693 if (debug_infrun)
4694 fprintf_unfiltered (gdb_stdlog,
4695 "infrun: signal arrived while stepping over "
4696 "breakpoint\n");
4697
4698 insert_hp_step_resume_breakpoint_at_frame (frame);
4699 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4700 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4701 ecs->event_thread->control.trap_expected = 0;
4702
4703 /* If we were nexting/stepping some other thread, switch to
4704 it, so that we don't continue it, losing control. */
4705 if (!switch_back_to_stepped_thread (ecs))
4706 keep_going (ecs);
4707 return;
4708 }
4709
4710 if (ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_0
4711 && (pc_in_thread_step_range (stop_pc, ecs->event_thread)
4712 || ecs->event_thread->control.step_range_end == 1)
4713 && frame_id_eq (get_stack_frame_id (frame),
4714 ecs->event_thread->control.step_stack_frame_id)
4715 && ecs->event_thread->control.step_resume_breakpoint == NULL)
4716 {
4717 /* The inferior is about to take a signal that will take it
4718 out of the single step range. Set a breakpoint at the
4719 current PC (which is presumably where the signal handler
4720 will eventually return) and then allow the inferior to
4721 run free.
4722
4723 Note that this is only needed for a signal delivered
4724 while in the single-step range. Nested signals aren't a
4725 problem as they eventually all return. */
4726 if (debug_infrun)
4727 fprintf_unfiltered (gdb_stdlog,
4728 "infrun: signal may take us out of "
4729 "single-step range\n");
4730
4731 insert_hp_step_resume_breakpoint_at_frame (frame);
4732 ecs->event_thread->step_after_step_resume_breakpoint = 1;
4733 /* Reset trap_expected to ensure breakpoints are re-inserted. */
4734 ecs->event_thread->control.trap_expected = 0;
4735 keep_going (ecs);
4736 return;
4737 }
4738
4739 /* Note: step_resume_breakpoint may be non-NULL. This occures
4740 when either there's a nested signal, or when there's a
4741 pending signal enabled just as the signal handler returns
4742 (leaving the inferior at the step-resume-breakpoint without
4743 actually executing it). Either way continue until the
4744 breakpoint is really hit. */
4745
4746 if (!switch_back_to_stepped_thread (ecs))
4747 {
4748 if (debug_infrun)
4749 fprintf_unfiltered (gdb_stdlog,
4750 "infrun: random signal, keep going\n");
4751
4752 keep_going (ecs);
4753 }
4754 return;
4755 }
4756
4757 process_event_stop_test (ecs);
4758 }
4759
4760 /* Come here when we've got some debug event / signal we can explain
4761 (IOW, not a random signal), and test whether it should cause a
4762 stop, or whether we should resume the inferior (transparently).
4763 E.g., could be a breakpoint whose condition evaluates false; we
4764 could be still stepping within the line; etc. */
4765
4766 static void
4767 process_event_stop_test (struct execution_control_state *ecs)
4768 {
4769 struct symtab_and_line stop_pc_sal;
4770 struct frame_info *frame;
4771 struct gdbarch *gdbarch;
4772 CORE_ADDR jmp_buf_pc;
4773 struct bpstat_what what;
4774
4775 /* Handle cases caused by hitting a breakpoint. */
4776
4777 frame = get_current_frame ();
4778 gdbarch = get_frame_arch (frame);
4779
4780 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
4781
4782 if (what.call_dummy)
4783 {
4784 stop_stack_dummy = what.call_dummy;
4785 }
4786
4787 /* If we hit an internal event that triggers symbol changes, the
4788 current frame will be invalidated within bpstat_what (e.g., if we
4789 hit an internal solib event). Re-fetch it. */
4790 frame = get_current_frame ();
4791 gdbarch = get_frame_arch (frame);
4792
4793 switch (what.main_action)
4794 {
4795 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4796 /* If we hit the breakpoint at longjmp while stepping, we
4797 install a momentary breakpoint at the target of the
4798 jmp_buf. */
4799
4800 if (debug_infrun)
4801 fprintf_unfiltered (gdb_stdlog,
4802 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4803
4804 ecs->event_thread->stepping_over_breakpoint = 1;
4805
4806 if (what.is_longjmp)
4807 {
4808 struct value *arg_value;
4809
4810 /* If we set the longjmp breakpoint via a SystemTap probe,
4811 then use it to extract the arguments. The destination PC
4812 is the third argument to the probe. */
4813 arg_value = probe_safe_evaluate_at_pc (frame, 2);
4814 if (arg_value)
4815 {
4816 jmp_buf_pc = value_as_address (arg_value);
4817 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
4818 }
4819 else if (!gdbarch_get_longjmp_target_p (gdbarch)
4820 || !gdbarch_get_longjmp_target (gdbarch,
4821 frame, &jmp_buf_pc))
4822 {
4823 if (debug_infrun)
4824 fprintf_unfiltered (gdb_stdlog,
4825 "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME "
4826 "(!gdbarch_get_longjmp_target)\n");
4827 keep_going (ecs);
4828 return;
4829 }
4830
4831 /* Insert a breakpoint at resume address. */
4832 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4833 }
4834 else
4835 check_exception_resume (ecs, frame);
4836 keep_going (ecs);
4837 return;
4838
4839 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4840 {
4841 struct frame_info *init_frame;
4842
4843 /* There are several cases to consider.
4844
4845 1. The initiating frame no longer exists. In this case we
4846 must stop, because the exception or longjmp has gone too
4847 far.
4848
4849 2. The initiating frame exists, and is the same as the
4850 current frame. We stop, because the exception or longjmp
4851 has been caught.
4852
4853 3. The initiating frame exists and is different from the
4854 current frame. This means the exception or longjmp has
4855 been caught beneath the initiating frame, so keep going.
4856
4857 4. longjmp breakpoint has been placed just to protect
4858 against stale dummy frames and user is not interested in
4859 stopping around longjmps. */
4860
4861 if (debug_infrun)
4862 fprintf_unfiltered (gdb_stdlog,
4863 "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4864
4865 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
4866 != NULL);
4867 delete_exception_resume_breakpoint (ecs->event_thread);
4868
4869 if (what.is_longjmp)
4870 {
4871 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
4872
4873 if (!frame_id_p (ecs->event_thread->initiating_frame))
4874 {
4875 /* Case 4. */
4876 keep_going (ecs);
4877 return;
4878 }
4879 }
4880
4881 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
4882
4883 if (init_frame)
4884 {
4885 struct frame_id current_id
4886 = get_frame_id (get_current_frame ());
4887 if (frame_id_eq (current_id,
4888 ecs->event_thread->initiating_frame))
4889 {
4890 /* Case 2. Fall through. */
4891 }
4892 else
4893 {
4894 /* Case 3. */
4895 keep_going (ecs);
4896 return;
4897 }
4898 }
4899
4900 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
4901 exists. */
4902 delete_step_resume_breakpoint (ecs->event_thread);
4903
4904 end_stepping_range (ecs);
4905 }
4906 return;
4907
4908 case BPSTAT_WHAT_SINGLE:
4909 if (debug_infrun)
4910 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4911 ecs->event_thread->stepping_over_breakpoint = 1;
4912 /* Still need to check other stuff, at least the case where we
4913 are stepping and step out of the right range. */
4914 break;
4915
4916 case BPSTAT_WHAT_STEP_RESUME:
4917 if (debug_infrun)
4918 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4919
4920 delete_step_resume_breakpoint (ecs->event_thread);
4921 if (ecs->event_thread->control.proceed_to_finish
4922 && execution_direction == EXEC_REVERSE)
4923 {
4924 struct thread_info *tp = ecs->event_thread;
4925
4926 /* We are finishing a function in reverse, and just hit the
4927 step-resume breakpoint at the start address of the
4928 function, and we're almost there -- just need to back up
4929 by one more single-step, which should take us back to the
4930 function call. */
4931 tp->control.step_range_start = tp->control.step_range_end = 1;
4932 keep_going (ecs);
4933 return;
4934 }
4935 fill_in_stop_func (gdbarch, ecs);
4936 if (stop_pc == ecs->stop_func_start
4937 && execution_direction == EXEC_REVERSE)
4938 {
4939 /* We are stepping over a function call in reverse, and just
4940 hit the step-resume breakpoint at the start address of
4941 the function. Go back to single-stepping, which should
4942 take us back to the function call. */
4943 ecs->event_thread->stepping_over_breakpoint = 1;
4944 keep_going (ecs);
4945 return;
4946 }
4947 break;
4948
4949 case BPSTAT_WHAT_STOP_NOISY:
4950 if (debug_infrun)
4951 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4952 stop_print_frame = 1;
4953
4954 /* Assume the thread stopped for a breapoint. We'll still check
4955 whether a/the breakpoint is there when the thread is next
4956 resumed. */
4957 ecs->event_thread->stepping_over_breakpoint = 1;
4958
4959 stop_waiting (ecs);
4960 return;
4961
4962 case BPSTAT_WHAT_STOP_SILENT:
4963 if (debug_infrun)
4964 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4965 stop_print_frame = 0;
4966
4967 /* Assume the thread stopped for a breapoint. We'll still check
4968 whether a/the breakpoint is there when the thread is next
4969 resumed. */
4970 ecs->event_thread->stepping_over_breakpoint = 1;
4971 stop_waiting (ecs);
4972 return;
4973
4974 case BPSTAT_WHAT_HP_STEP_RESUME:
4975 if (debug_infrun)
4976 fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_HP_STEP_RESUME\n");
4977
4978 delete_step_resume_breakpoint (ecs->event_thread);
4979 if (ecs->event_thread->step_after_step_resume_breakpoint)
4980 {
4981 /* Back when the step-resume breakpoint was inserted, we
4982 were trying to single-step off a breakpoint. Go back to
4983 doing that. */
4984 ecs->event_thread->step_after_step_resume_breakpoint = 0;
4985 ecs->event_thread->stepping_over_breakpoint = 1;
4986 keep_going (ecs);
4987 return;
4988 }
4989 break;
4990
4991 case BPSTAT_WHAT_KEEP_CHECKING:
4992 break;
4993 }
4994
4995 /* If we stepped a permanent breakpoint and we had a high priority
4996 step-resume breakpoint for the address we stepped, but we didn't
4997 hit it, then we must have stepped into the signal handler. The
4998 step-resume was only necessary to catch the case of _not_
4999 stepping into the handler, so delete it, and fall through to
5000 checking whether the step finished. */
5001 if (ecs->event_thread->stepped_breakpoint)
5002 {
5003 struct breakpoint *sr_bp
5004 = ecs->event_thread->control.step_resume_breakpoint;
5005
5006 if (sr_bp != NULL
5007 && sr_bp->loc->permanent
5008 && sr_bp->type == bp_hp_step_resume
5009 && sr_bp->loc->address == ecs->event_thread->prev_pc)
5010 {
5011 if (debug_infrun)
5012 fprintf_unfiltered (gdb_stdlog,
5013 "infrun: stepped permanent breakpoint, stopped in "
5014 "handler\n");
5015 delete_step_resume_breakpoint (ecs->event_thread);
5016 ecs->event_thread->step_after_step_resume_breakpoint = 0;
5017 }
5018 }
5019
5020 /* We come here if we hit a breakpoint but should not stop for it.
5021 Possibly we also were stepping and should stop for that. So fall
5022 through and test for stepping. But, if not stepping, do not
5023 stop. */
5024
5025 /* In all-stop mode, if we're currently stepping but have stopped in
5026 some other thread, we need to switch back to the stepped thread. */
5027 if (switch_back_to_stepped_thread (ecs))
5028 return;
5029
5030 if (ecs->event_thread->control.step_resume_breakpoint)
5031 {
5032 if (debug_infrun)
5033 fprintf_unfiltered (gdb_stdlog,
5034 "infrun: step-resume breakpoint is inserted\n");
5035
5036 /* Having a step-resume breakpoint overrides anything
5037 else having to do with stepping commands until
5038 that breakpoint is reached. */
5039 keep_going (ecs);
5040 return;
5041 }
5042
5043 if (ecs->event_thread->control.step_range_end == 0)
5044 {
5045 if (debug_infrun)
5046 fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
5047 /* Likewise if we aren't even stepping. */
5048 keep_going (ecs);
5049 return;
5050 }
5051
5052 /* Re-fetch current thread's frame in case the code above caused
5053 the frame cache to be re-initialized, making our FRAME variable
5054 a dangling pointer. */
5055 frame = get_current_frame ();
5056 gdbarch = get_frame_arch (frame);
5057 fill_in_stop_func (gdbarch, ecs);
5058
5059 /* If stepping through a line, keep going if still within it.
5060
5061 Note that step_range_end is the address of the first instruction
5062 beyond the step range, and NOT the address of the last instruction
5063 within it!
5064
5065 Note also that during reverse execution, we may be stepping
5066 through a function epilogue and therefore must detect when
5067 the current-frame changes in the middle of a line. */
5068
5069 if (pc_in_thread_step_range (stop_pc, ecs->event_thread)
5070 && (execution_direction != EXEC_REVERSE
5071 || frame_id_eq (get_frame_id (frame),
5072 ecs->event_thread->control.step_frame_id)))
5073 {
5074 if (debug_infrun)
5075 fprintf_unfiltered
5076 (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
5077 paddress (gdbarch, ecs->event_thread->control.step_range_start),
5078 paddress (gdbarch, ecs->event_thread->control.step_range_end));
5079
5080 /* Tentatively re-enable range stepping; `resume' disables it if
5081 necessary (e.g., if we're stepping over a breakpoint or we
5082 have software watchpoints). */
5083 ecs->event_thread->control.may_range_step = 1;
5084
5085 /* When stepping backward, stop at beginning of line range
5086 (unless it's the function entry point, in which case
5087 keep going back to the call point). */
5088 if (stop_pc == ecs->event_thread->control.step_range_start
5089 && stop_pc != ecs->stop_func_start
5090 && execution_direction == EXEC_REVERSE)
5091 end_stepping_range (ecs);
5092 else
5093 keep_going (ecs);
5094
5095 return;
5096 }
5097
5098 /* We stepped out of the stepping range. */
5099
5100 /* If we are stepping at the source level and entered the runtime
5101 loader dynamic symbol resolution code...
5102
5103 EXEC_FORWARD: we keep on single stepping until we exit the run
5104 time loader code and reach the callee's address.
5105
5106 EXEC_REVERSE: we've already executed the callee (backward), and
5107 the runtime loader code is handled just like any other
5108 undebuggable function call. Now we need only keep stepping
5109 backward through the trampoline code, and that's handled further
5110 down, so there is nothing for us to do here. */
5111
5112 if (execution_direction != EXEC_REVERSE
5113 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5114 && in_solib_dynsym_resolve_code (stop_pc))
5115 {
5116 CORE_ADDR pc_after_resolver =
5117 gdbarch_skip_solib_resolver (gdbarch, stop_pc);
5118
5119 if (debug_infrun)
5120 fprintf_unfiltered (gdb_stdlog,
5121 "infrun: stepped into dynsym resolve code\n");
5122
5123 if (pc_after_resolver)
5124 {
5125 /* Set up a step-resume breakpoint at the address
5126 indicated by SKIP_SOLIB_RESOLVER. */
5127 struct symtab_and_line sr_sal;
5128
5129 init_sal (&sr_sal);
5130 sr_sal.pc = pc_after_resolver;
5131 sr_sal.pspace = get_frame_program_space (frame);
5132
5133 insert_step_resume_breakpoint_at_sal (gdbarch,
5134 sr_sal, null_frame_id);
5135 }
5136
5137 keep_going (ecs);
5138 return;
5139 }
5140
5141 if (ecs->event_thread->control.step_range_end != 1
5142 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5143 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5144 && get_frame_type (frame) == SIGTRAMP_FRAME)
5145 {
5146 if (debug_infrun)
5147 fprintf_unfiltered (gdb_stdlog,
5148 "infrun: stepped into signal trampoline\n");
5149 /* The inferior, while doing a "step" or "next", has ended up in
5150 a signal trampoline (either by a signal being delivered or by
5151 the signal handler returning). Just single-step until the
5152 inferior leaves the trampoline (either by calling the handler
5153 or returning). */
5154 keep_going (ecs);
5155 return;
5156 }
5157
5158 /* If we're in the return path from a shared library trampoline,
5159 we want to proceed through the trampoline when stepping. */
5160 /* macro/2012-04-25: This needs to come before the subroutine
5161 call check below as on some targets return trampolines look
5162 like subroutine calls (MIPS16 return thunks). */
5163 if (gdbarch_in_solib_return_trampoline (gdbarch,
5164 stop_pc, ecs->stop_func_name)
5165 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5166 {
5167 /* Determine where this trampoline returns. */
5168 CORE_ADDR real_stop_pc;
5169
5170 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5171
5172 if (debug_infrun)
5173 fprintf_unfiltered (gdb_stdlog,
5174 "infrun: stepped into solib return tramp\n");
5175
5176 /* Only proceed through if we know where it's going. */
5177 if (real_stop_pc)
5178 {
5179 /* And put the step-breakpoint there and go until there. */
5180 struct symtab_and_line sr_sal;
5181
5182 init_sal (&sr_sal); /* initialize to zeroes */
5183 sr_sal.pc = real_stop_pc;
5184 sr_sal.section = find_pc_overlay (sr_sal.pc);
5185 sr_sal.pspace = get_frame_program_space (frame);
5186
5187 /* Do not specify what the fp should be when we stop since
5188 on some machines the prologue is where the new fp value
5189 is established. */
5190 insert_step_resume_breakpoint_at_sal (gdbarch,
5191 sr_sal, null_frame_id);
5192
5193 /* Restart without fiddling with the step ranges or
5194 other state. */
5195 keep_going (ecs);
5196 return;
5197 }
5198 }
5199
5200 /* Check for subroutine calls. The check for the current frame
5201 equalling the step ID is not necessary - the check of the
5202 previous frame's ID is sufficient - but it is a common case and
5203 cheaper than checking the previous frame's ID.
5204
5205 NOTE: frame_id_eq will never report two invalid frame IDs as
5206 being equal, so to get into this block, both the current and
5207 previous frame must have valid frame IDs. */
5208 /* The outer_frame_id check is a heuristic to detect stepping
5209 through startup code. If we step over an instruction which
5210 sets the stack pointer from an invalid value to a valid value,
5211 we may detect that as a subroutine call from the mythical
5212 "outermost" function. This could be fixed by marking
5213 outermost frames as !stack_p,code_p,special_p. Then the
5214 initial outermost frame, before sp was valid, would
5215 have code_addr == &_start. See the comment in frame_id_eq
5216 for more. */
5217 if (!frame_id_eq (get_stack_frame_id (frame),
5218 ecs->event_thread->control.step_stack_frame_id)
5219 && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
5220 ecs->event_thread->control.step_stack_frame_id)
5221 && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
5222 outer_frame_id)
5223 || (ecs->event_thread->control.step_start_function
5224 != find_pc_function (stop_pc)))))
5225 {
5226 CORE_ADDR real_stop_pc;
5227
5228 if (debug_infrun)
5229 fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
5230
5231 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
5232 {
5233 /* I presume that step_over_calls is only 0 when we're
5234 supposed to be stepping at the assembly language level
5235 ("stepi"). Just stop. */
5236 /* And this works the same backward as frontward. MVS */
5237 end_stepping_range (ecs);
5238 return;
5239 }
5240
5241 /* Reverse stepping through solib trampolines. */
5242
5243 if (execution_direction == EXEC_REVERSE
5244 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
5245 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5246 || (ecs->stop_func_start == 0
5247 && in_solib_dynsym_resolve_code (stop_pc))))
5248 {
5249 /* Any solib trampoline code can be handled in reverse
5250 by simply continuing to single-step. We have already
5251 executed the solib function (backwards), and a few
5252 steps will take us back through the trampoline to the
5253 caller. */
5254 keep_going (ecs);
5255 return;
5256 }
5257
5258 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5259 {
5260 /* We're doing a "next".
5261
5262 Normal (forward) execution: set a breakpoint at the
5263 callee's return address (the address at which the caller
5264 will resume).
5265
5266 Reverse (backward) execution. set the step-resume
5267 breakpoint at the start of the function that we just
5268 stepped into (backwards), and continue to there. When we
5269 get there, we'll need to single-step back to the caller. */
5270
5271 if (execution_direction == EXEC_REVERSE)
5272 {
5273 /* If we're already at the start of the function, we've either
5274 just stepped backward into a single instruction function,
5275 or stepped back out of a signal handler to the first instruction
5276 of the function. Just keep going, which will single-step back
5277 to the caller. */
5278 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
5279 {
5280 struct symtab_and_line sr_sal;
5281
5282 /* Normal function call return (static or dynamic). */
5283 init_sal (&sr_sal);
5284 sr_sal.pc = ecs->stop_func_start;
5285 sr_sal.pspace = get_frame_program_space (frame);
5286 insert_step_resume_breakpoint_at_sal (gdbarch,
5287 sr_sal, null_frame_id);
5288 }
5289 }
5290 else
5291 insert_step_resume_breakpoint_at_caller (frame);
5292
5293 keep_going (ecs);
5294 return;
5295 }
5296
5297 /* If we are in a function call trampoline (a stub between the
5298 calling routine and the real function), locate the real
5299 function. That's what tells us (a) whether we want to step
5300 into it at all, and (b) what prologue we want to run to the
5301 end of, if we do step into it. */
5302 real_stop_pc = skip_language_trampoline (frame, stop_pc);
5303 if (real_stop_pc == 0)
5304 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
5305 if (real_stop_pc != 0)
5306 ecs->stop_func_start = real_stop_pc;
5307
5308 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
5309 {
5310 struct symtab_and_line sr_sal;
5311
5312 init_sal (&sr_sal);
5313 sr_sal.pc = ecs->stop_func_start;
5314 sr_sal.pspace = get_frame_program_space (frame);
5315
5316 insert_step_resume_breakpoint_at_sal (gdbarch,
5317 sr_sal, null_frame_id);
5318 keep_going (ecs);
5319 return;
5320 }
5321
5322 /* If we have line number information for the function we are
5323 thinking of stepping into and the function isn't on the skip
5324 list, step into it.
5325
5326 If there are several symtabs at that PC (e.g. with include
5327 files), just want to know whether *any* of them have line
5328 numbers. find_pc_line handles this. */
5329 {
5330 struct symtab_and_line tmp_sal;
5331
5332 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
5333 if (tmp_sal.line != 0
5334 && !function_name_is_marked_for_skip (ecs->stop_func_name,
5335 &tmp_sal))
5336 {
5337 if (execution_direction == EXEC_REVERSE)
5338 handle_step_into_function_backward (gdbarch, ecs);
5339 else
5340 handle_step_into_function (gdbarch, ecs);
5341 return;
5342 }
5343 }
5344
5345 /* If we have no line number and the step-stop-if-no-debug is
5346 set, we stop the step so that the user has a chance to switch
5347 in assembly mode. */
5348 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5349 && step_stop_if_no_debug)
5350 {
5351 end_stepping_range (ecs);
5352 return;
5353 }
5354
5355 if (execution_direction == EXEC_REVERSE)
5356 {
5357 /* If we're already at the start of the function, we've either just
5358 stepped backward into a single instruction function without line
5359 number info, or stepped back out of a signal handler to the first
5360 instruction of the function without line number info. Just keep
5361 going, which will single-step back to the caller. */
5362 if (ecs->stop_func_start != stop_pc)
5363 {
5364 /* Set a breakpoint at callee's start address.
5365 From there we can step once and be back in the caller. */
5366 struct symtab_and_line sr_sal;
5367
5368 init_sal (&sr_sal);
5369 sr_sal.pc = ecs->stop_func_start;
5370 sr_sal.pspace = get_frame_program_space (frame);
5371 insert_step_resume_breakpoint_at_sal (gdbarch,
5372 sr_sal, null_frame_id);
5373 }
5374 }
5375 else
5376 /* Set a breakpoint at callee's return address (the address
5377 at which the caller will resume). */
5378 insert_step_resume_breakpoint_at_caller (frame);
5379
5380 keep_going (ecs);
5381 return;
5382 }
5383
5384 /* Reverse stepping through solib trampolines. */
5385
5386 if (execution_direction == EXEC_REVERSE
5387 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
5388 {
5389 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
5390 || (ecs->stop_func_start == 0
5391 && in_solib_dynsym_resolve_code (stop_pc)))
5392 {
5393 /* Any solib trampoline code can be handled in reverse
5394 by simply continuing to single-step. We have already
5395 executed the solib function (backwards), and a few
5396 steps will take us back through the trampoline to the
5397 caller. */
5398 keep_going (ecs);
5399 return;
5400 }
5401 else if (in_solib_dynsym_resolve_code (stop_pc))
5402 {
5403 /* Stepped backward into the solib dynsym resolver.
5404 Set a breakpoint at its start and continue, then
5405 one more step will take us out. */
5406 struct symtab_and_line sr_sal;
5407
5408 init_sal (&sr_sal);
5409 sr_sal.pc = ecs->stop_func_start;
5410 sr_sal.pspace = get_frame_program_space (frame);
5411 insert_step_resume_breakpoint_at_sal (gdbarch,
5412 sr_sal, null_frame_id);
5413 keep_going (ecs);
5414 return;
5415 }
5416 }
5417
5418 stop_pc_sal = find_pc_line (stop_pc, 0);
5419
5420 /* NOTE: tausq/2004-05-24: This if block used to be done before all
5421 the trampoline processing logic, however, there are some trampolines
5422 that have no names, so we should do trampoline handling first. */
5423 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
5424 && ecs->stop_func_name == NULL
5425 && stop_pc_sal.line == 0)
5426 {
5427 if (debug_infrun)
5428 fprintf_unfiltered (gdb_stdlog,
5429 "infrun: stepped into undebuggable function\n");
5430
5431 /* The inferior just stepped into, or returned to, an
5432 undebuggable function (where there is no debugging information
5433 and no line number corresponding to the address where the
5434 inferior stopped). Since we want to skip this kind of code,
5435 we keep going until the inferior returns from this
5436 function - unless the user has asked us not to (via
5437 set step-mode) or we no longer know how to get back
5438 to the call site. */
5439 if (step_stop_if_no_debug
5440 || !frame_id_p (frame_unwind_caller_id (frame)))
5441 {
5442 /* If we have no line number and the step-stop-if-no-debug
5443 is set, we stop the step so that the user has a chance to
5444 switch in assembly mode. */
5445 end_stepping_range (ecs);
5446 return;
5447 }
5448 else
5449 {
5450 /* Set a breakpoint at callee's return address (the address
5451 at which the caller will resume). */
5452 insert_step_resume_breakpoint_at_caller (frame);
5453 keep_going (ecs);
5454 return;
5455 }
5456 }
5457
5458 if (ecs->event_thread->control.step_range_end == 1)
5459 {
5460 /* It is stepi or nexti. We always want to stop stepping after
5461 one instruction. */
5462 if (debug_infrun)
5463 fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
5464 end_stepping_range (ecs);
5465 return;
5466 }
5467
5468 if (stop_pc_sal.line == 0)
5469 {
5470 /* We have no line number information. That means to stop
5471 stepping (does this always happen right after one instruction,
5472 when we do "s" in a function with no line numbers,
5473 or can this happen as a result of a return or longjmp?). */
5474 if (debug_infrun)
5475 fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
5476 end_stepping_range (ecs);
5477 return;
5478 }
5479
5480 /* Look for "calls" to inlined functions, part one. If the inline
5481 frame machinery detected some skipped call sites, we have entered
5482 a new inline function. */
5483
5484 if (frame_id_eq (get_frame_id (get_current_frame ()),
5485 ecs->event_thread->control.step_frame_id)
5486 && inline_skipped_frames (ecs->ptid))
5487 {
5488 struct symtab_and_line call_sal;
5489
5490 if (debug_infrun)
5491 fprintf_unfiltered (gdb_stdlog,
5492 "infrun: stepped into inlined function\n");
5493
5494 find_frame_sal (get_current_frame (), &call_sal);
5495
5496 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
5497 {
5498 /* For "step", we're going to stop. But if the call site
5499 for this inlined function is on the same source line as
5500 we were previously stepping, go down into the function
5501 first. Otherwise stop at the call site. */
5502
5503 if (call_sal.line == ecs->event_thread->current_line
5504 && call_sal.symtab == ecs->event_thread->current_symtab)
5505 step_into_inline_frame (ecs->ptid);
5506
5507 end_stepping_range (ecs);
5508 return;
5509 }
5510 else
5511 {
5512 /* For "next", we should stop at the call site if it is on a
5513 different source line. Otherwise continue through the
5514 inlined function. */
5515 if (call_sal.line == ecs->event_thread->current_line
5516 && call_sal.symtab == ecs->event_thread->current_symtab)
5517 keep_going (ecs);
5518 else
5519 end_stepping_range (ecs);
5520 return;
5521 }
5522 }
5523
5524 /* Look for "calls" to inlined functions, part two. If we are still
5525 in the same real function we were stepping through, but we have
5526 to go further up to find the exact frame ID, we are stepping
5527 through a more inlined call beyond its call site. */
5528
5529 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
5530 && !frame_id_eq (get_frame_id (get_current_frame ()),
5531 ecs->event_thread->control.step_frame_id)
5532 && stepped_in_from (get_current_frame (),
5533 ecs->event_thread->control.step_frame_id))
5534 {
5535 if (debug_infrun)
5536 fprintf_unfiltered (gdb_stdlog,
5537 "infrun: stepping through inlined function\n");
5538
5539 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
5540 keep_going (ecs);
5541 else
5542 end_stepping_range (ecs);
5543 return;
5544 }
5545
5546 if ((stop_pc == stop_pc_sal.pc)
5547 && (ecs->event_thread->current_line != stop_pc_sal.line
5548 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
5549 {
5550 /* We are at the start of a different line. So stop. Note that
5551 we don't stop if we step into the middle of a different line.
5552 That is said to make things like for (;;) statements work
5553 better. */
5554 if (debug_infrun)
5555 fprintf_unfiltered (gdb_stdlog,
5556 "infrun: stepped to a different line\n");
5557 end_stepping_range (ecs);
5558 return;
5559 }
5560
5561 /* We aren't done stepping.
5562
5563 Optimize by setting the stepping range to the line.
5564 (We might not be in the original line, but if we entered a
5565 new line in mid-statement, we continue stepping. This makes
5566 things like for(;;) statements work better.) */
5567
5568 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
5569 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
5570 ecs->event_thread->control.may_range_step = 1;
5571 set_step_info (frame, stop_pc_sal);
5572
5573 if (debug_infrun)
5574 fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
5575 keep_going (ecs);
5576 }
5577
5578 /* In all-stop mode, if we're currently stepping but have stopped in
5579 some other thread, we may need to switch back to the stepped
5580 thread. Returns true we set the inferior running, false if we left
5581 it stopped (and the event needs further processing). */
5582
5583 static int
5584 switch_back_to_stepped_thread (struct execution_control_state *ecs)
5585 {
5586 if (!non_stop)
5587 {
5588 struct thread_info *tp;
5589 struct thread_info *stepping_thread;
5590 struct thread_info *step_over;
5591
5592 /* If any thread is blocked on some internal breakpoint, and we
5593 simply need to step over that breakpoint to get it going
5594 again, do that first. */
5595
5596 /* However, if we see an event for the stepping thread, then we
5597 know all other threads have been moved past their breakpoints
5598 already. Let the caller check whether the step is finished,
5599 etc., before deciding to move it past a breakpoint. */
5600 if (ecs->event_thread->control.step_range_end != 0)
5601 return 0;
5602
5603 /* Check if the current thread is blocked on an incomplete
5604 step-over, interrupted by a random signal. */
5605 if (ecs->event_thread->control.trap_expected
5606 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
5607 {
5608 if (debug_infrun)
5609 {
5610 fprintf_unfiltered (gdb_stdlog,
5611 "infrun: need to finish step-over of [%s]\n",
5612 target_pid_to_str (ecs->event_thread->ptid));
5613 }
5614 keep_going (ecs);
5615 return 1;
5616 }
5617
5618 /* Check if the current thread is blocked by a single-step
5619 breakpoint of another thread. */
5620 if (ecs->hit_singlestep_breakpoint)
5621 {
5622 if (debug_infrun)
5623 {
5624 fprintf_unfiltered (gdb_stdlog,
5625 "infrun: need to step [%s] over single-step "
5626 "breakpoint\n",
5627 target_pid_to_str (ecs->ptid));
5628 }
5629 keep_going (ecs);
5630 return 1;
5631 }
5632
5633 /* Otherwise, we no longer expect a trap in the current thread.
5634 Clear the trap_expected flag before switching back -- this is
5635 what keep_going does as well, if we call it. */
5636 ecs->event_thread->control.trap_expected = 0;
5637
5638 /* Likewise, clear the signal if it should not be passed. */
5639 if (!signal_program[ecs->event_thread->suspend.stop_signal])
5640 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
5641
5642 /* If scheduler locking applies even if not stepping, there's no
5643 need to walk over threads. Above we've checked whether the
5644 current thread is stepping. If some other thread not the
5645 event thread is stepping, then it must be that scheduler
5646 locking is not in effect. */
5647 if (schedlock_applies (ecs->event_thread))
5648 return 0;
5649
5650 /* Look for the stepping/nexting thread, and check if any other
5651 thread other than the stepping thread needs to start a
5652 step-over. Do all step-overs before actually proceeding with
5653 step/next/etc. */
5654 stepping_thread = NULL;
5655 step_over = NULL;
5656 ALL_NON_EXITED_THREADS (tp)
5657 {
5658 /* Ignore threads of processes we're not resuming. */
5659 if (!sched_multi
5660 && ptid_get_pid (tp->ptid) != ptid_get_pid (inferior_ptid))
5661 continue;
5662
5663 /* When stepping over a breakpoint, we lock all threads
5664 except the one that needs to move past the breakpoint.
5665 If a non-event thread has this set, the "incomplete
5666 step-over" check above should have caught it earlier. */
5667 gdb_assert (!tp->control.trap_expected);
5668
5669 /* Did we find the stepping thread? */
5670 if (tp->control.step_range_end)
5671 {
5672 /* Yep. There should only one though. */
5673 gdb_assert (stepping_thread == NULL);
5674
5675 /* The event thread is handled at the top, before we
5676 enter this loop. */
5677 gdb_assert (tp != ecs->event_thread);
5678
5679 /* If some thread other than the event thread is
5680 stepping, then scheduler locking can't be in effect,
5681 otherwise we wouldn't have resumed the current event
5682 thread in the first place. */
5683 gdb_assert (!schedlock_applies (tp));
5684
5685 stepping_thread = tp;
5686 }
5687 else if (thread_still_needs_step_over (tp))
5688 {
5689 step_over = tp;
5690
5691 /* At the top we've returned early if the event thread
5692 is stepping. If some other thread not the event
5693 thread is stepping, then scheduler locking can't be
5694 in effect, and we can resume this thread. No need to
5695 keep looking for the stepping thread then. */
5696 break;
5697 }
5698 }
5699
5700 if (step_over != NULL)
5701 {
5702 tp = step_over;
5703 if (debug_infrun)
5704 {
5705 fprintf_unfiltered (gdb_stdlog,
5706 "infrun: need to step-over [%s]\n",
5707 target_pid_to_str (tp->ptid));
5708 }
5709
5710 /* Only the stepping thread should have this set. */
5711 gdb_assert (tp->control.step_range_end == 0);
5712
5713 ecs->ptid = tp->ptid;
5714 ecs->event_thread = tp;
5715 switch_to_thread (ecs->ptid);
5716 keep_going (ecs);
5717 return 1;
5718 }
5719
5720 if (stepping_thread != NULL)
5721 {
5722 struct frame_info *frame;
5723 struct gdbarch *gdbarch;
5724
5725 tp = stepping_thread;
5726
5727 /* If the stepping thread exited, then don't try to switch
5728 back and resume it, which could fail in several different
5729 ways depending on the target. Instead, just keep going.
5730
5731 We can find a stepping dead thread in the thread list in
5732 two cases:
5733
5734 - The target supports thread exit events, and when the
5735 target tries to delete the thread from the thread list,
5736 inferior_ptid pointed at the exiting thread. In such
5737 case, calling delete_thread does not really remove the
5738 thread from the list; instead, the thread is left listed,
5739 with 'exited' state.
5740
5741 - The target's debug interface does not support thread
5742 exit events, and so we have no idea whatsoever if the
5743 previously stepping thread is still alive. For that
5744 reason, we need to synchronously query the target
5745 now. */
5746 if (is_exited (tp->ptid)
5747 || !target_thread_alive (tp->ptid))
5748 {
5749 if (debug_infrun)
5750 fprintf_unfiltered (gdb_stdlog,
5751 "infrun: not switching back to "
5752 "stepped thread, it has vanished\n");
5753
5754 delete_thread (tp->ptid);
5755 keep_going (ecs);
5756 return 1;
5757 }
5758
5759 if (debug_infrun)
5760 fprintf_unfiltered (gdb_stdlog,
5761 "infrun: switching back to stepped thread\n");
5762
5763 ecs->event_thread = tp;
5764 ecs->ptid = tp->ptid;
5765 context_switch (ecs->ptid);
5766
5767 stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
5768 frame = get_current_frame ();
5769 gdbarch = get_frame_arch (frame);
5770
5771 /* If the PC of the thread we were trying to single-step has
5772 changed, then that thread has trapped or been signaled,
5773 but the event has not been reported to GDB yet. Re-poll
5774 the target looking for this particular thread's event
5775 (i.e. temporarily enable schedlock) by:
5776
5777 - setting a break at the current PC
5778 - resuming that particular thread, only (by setting
5779 trap expected)
5780
5781 This prevents us continuously moving the single-step
5782 breakpoint forward, one instruction at a time,
5783 overstepping. */
5784
5785 if (stop_pc != tp->prev_pc)
5786 {
5787 ptid_t resume_ptid;
5788
5789 if (debug_infrun)
5790 fprintf_unfiltered (gdb_stdlog,
5791 "infrun: expected thread advanced also\n");
5792
5793 /* Clear the info of the previous step-over, as it's no
5794 longer valid. It's what keep_going would do too, if
5795 we called it. Must do this before trying to insert
5796 the sss breakpoint, otherwise if we were previously
5797 trying to step over this exact address in another
5798 thread, the breakpoint ends up not installed. */
5799 clear_step_over_info ();
5800
5801 insert_single_step_breakpoint (get_frame_arch (frame),
5802 get_frame_address_space (frame),
5803 stop_pc);
5804
5805 resume_ptid = user_visible_resume_ptid (tp->control.stepping_command);
5806 do_target_resume (resume_ptid,
5807 currently_stepping (tp), GDB_SIGNAL_0);
5808 prepare_to_wait (ecs);
5809 }
5810 else
5811 {
5812 if (debug_infrun)
5813 fprintf_unfiltered (gdb_stdlog,
5814 "infrun: expected thread still "
5815 "hasn't advanced\n");
5816 keep_going (ecs);
5817 }
5818
5819 return 1;
5820 }
5821 }
5822 return 0;
5823 }
5824
5825 /* Is thread TP in the middle of single-stepping? */
5826
5827 static int
5828 currently_stepping (struct thread_info *tp)
5829 {
5830 return ((tp->control.step_range_end
5831 && tp->control.step_resume_breakpoint == NULL)
5832 || tp->control.trap_expected
5833 || tp->stepped_breakpoint
5834 || bpstat_should_step ());
5835 }
5836
5837 /* Inferior has stepped into a subroutine call with source code that
5838 we should not step over. Do step to the first line of code in
5839 it. */
5840
5841 static void
5842 handle_step_into_function (struct gdbarch *gdbarch,
5843 struct execution_control_state *ecs)
5844 {
5845 struct compunit_symtab *cust;
5846 struct symtab_and_line stop_func_sal, sr_sal;
5847
5848 fill_in_stop_func (gdbarch, ecs);
5849
5850 cust = find_pc_compunit_symtab (stop_pc);
5851 if (cust != NULL && compunit_language (cust) != language_asm)
5852 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5853 ecs->stop_func_start);
5854
5855 stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
5856 /* Use the step_resume_break to step until the end of the prologue,
5857 even if that involves jumps (as it seems to on the vax under
5858 4.2). */
5859 /* If the prologue ends in the middle of a source line, continue to
5860 the end of that source line (if it is still within the function).
5861 Otherwise, just go to end of prologue. */
5862 if (stop_func_sal.end
5863 && stop_func_sal.pc != ecs->stop_func_start
5864 && stop_func_sal.end < ecs->stop_func_end)
5865 ecs->stop_func_start = stop_func_sal.end;
5866
5867 /* Architectures which require breakpoint adjustment might not be able
5868 to place a breakpoint at the computed address. If so, the test
5869 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
5870 ecs->stop_func_start to an address at which a breakpoint may be
5871 legitimately placed.
5872
5873 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
5874 made, GDB will enter an infinite loop when stepping through
5875 optimized code consisting of VLIW instructions which contain
5876 subinstructions corresponding to different source lines. On
5877 FR-V, it's not permitted to place a breakpoint on any but the
5878 first subinstruction of a VLIW instruction. When a breakpoint is
5879 set, GDB will adjust the breakpoint address to the beginning of
5880 the VLIW instruction. Thus, we need to make the corresponding
5881 adjustment here when computing the stop address. */
5882
5883 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
5884 {
5885 ecs->stop_func_start
5886 = gdbarch_adjust_breakpoint_address (gdbarch,
5887 ecs->stop_func_start);
5888 }
5889
5890 if (ecs->stop_func_start == stop_pc)
5891 {
5892 /* We are already there: stop now. */
5893 end_stepping_range (ecs);
5894 return;
5895 }
5896 else
5897 {
5898 /* Put the step-breakpoint there and go until there. */
5899 init_sal (&sr_sal); /* initialize to zeroes */
5900 sr_sal.pc = ecs->stop_func_start;
5901 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
5902 sr_sal.pspace = get_frame_program_space (get_current_frame ());
5903
5904 /* Do not specify what the fp should be when we stop since on
5905 some machines the prologue is where the new fp value is
5906 established. */
5907 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
5908
5909 /* And make sure stepping stops right away then. */
5910 ecs->event_thread->control.step_range_end
5911 = ecs->event_thread->control.step_range_start;
5912 }
5913 keep_going (ecs);
5914 }
5915
5916 /* Inferior has stepped backward into a subroutine call with source
5917 code that we should not step over. Do step to the beginning of the
5918 last line of code in it. */
5919
5920 static void
5921 handle_step_into_function_backward (struct gdbarch *gdbarch,
5922 struct execution_control_state *ecs)
5923 {
5924 struct compunit_symtab *cust;
5925 struct symtab_and_line stop_func_sal;
5926
5927 fill_in_stop_func (gdbarch, ecs);
5928
5929 cust = find_pc_compunit_symtab (stop_pc);
5930 if (cust != NULL && compunit_language (cust) != language_asm)
5931 ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
5932 ecs->stop_func_start);
5933
5934 stop_func_sal = find_pc_line (stop_pc, 0);
5935
5936 /* OK, we're just going to keep stepping here. */
5937 if (stop_func_sal.pc == stop_pc)
5938 {
5939 /* We're there already. Just stop stepping now. */
5940 end_stepping_range (ecs);
5941 }
5942 else
5943 {
5944 /* Else just reset the step range and keep going.
5945 No step-resume breakpoint, they don't work for
5946 epilogues, which can have multiple entry paths. */
5947 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
5948 ecs->event_thread->control.step_range_end = stop_func_sal.end;
5949 keep_going (ecs);
5950 }
5951 return;
5952 }
5953
5954 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
5955 This is used to both functions and to skip over code. */
5956
5957 static void
5958 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
5959 struct symtab_and_line sr_sal,
5960 struct frame_id sr_id,
5961 enum bptype sr_type)
5962 {
5963 /* There should never be more than one step-resume or longjmp-resume
5964 breakpoint per thread, so we should never be setting a new
5965 step_resume_breakpoint when one is already active. */
5966 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
5967 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
5968
5969 if (debug_infrun)
5970 fprintf_unfiltered (gdb_stdlog,
5971 "infrun: inserting step-resume breakpoint at %s\n",
5972 paddress (gdbarch, sr_sal.pc));
5973
5974 inferior_thread ()->control.step_resume_breakpoint
5975 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type);
5976 }
5977
5978 void
5979 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
5980 struct symtab_and_line sr_sal,
5981 struct frame_id sr_id)
5982 {
5983 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
5984 sr_sal, sr_id,
5985 bp_step_resume);
5986 }
5987
5988 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
5989 This is used to skip a potential signal handler.
5990
5991 This is called with the interrupted function's frame. The signal
5992 handler, when it returns, will resume the interrupted function at
5993 RETURN_FRAME.pc. */
5994
5995 static void
5996 insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
5997 {
5998 struct symtab_and_line sr_sal;
5999 struct gdbarch *gdbarch;
6000
6001 gdb_assert (return_frame != NULL);
6002 init_sal (&sr_sal); /* initialize to zeros */
6003
6004 gdbarch = get_frame_arch (return_frame);
6005 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
6006 sr_sal.section = find_pc_overlay (sr_sal.pc);
6007 sr_sal.pspace = get_frame_program_space (return_frame);
6008
6009 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
6010 get_stack_frame_id (return_frame),
6011 bp_hp_step_resume);
6012 }
6013
6014 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
6015 is used to skip a function after stepping into it (for "next" or if
6016 the called function has no debugging information).
6017
6018 The current function has almost always been reached by single
6019 stepping a call or return instruction. NEXT_FRAME belongs to the
6020 current function, and the breakpoint will be set at the caller's
6021 resume address.
6022
6023 This is a separate function rather than reusing
6024 insert_hp_step_resume_breakpoint_at_frame in order to avoid
6025 get_prev_frame, which may stop prematurely (see the implementation
6026 of frame_unwind_caller_id for an example). */
6027
6028 static void
6029 insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
6030 {
6031 struct symtab_and_line sr_sal;
6032 struct gdbarch *gdbarch;
6033
6034 /* We shouldn't have gotten here if we don't know where the call site
6035 is. */
6036 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
6037
6038 init_sal (&sr_sal); /* initialize to zeros */
6039
6040 gdbarch = frame_unwind_caller_arch (next_frame);
6041 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
6042 frame_unwind_caller_pc (next_frame));
6043 sr_sal.section = find_pc_overlay (sr_sal.pc);
6044 sr_sal.pspace = frame_unwind_program_space (next_frame);
6045
6046 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
6047 frame_unwind_caller_id (next_frame));
6048 }
6049
6050 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
6051 new breakpoint at the target of a jmp_buf. The handling of
6052 longjmp-resume uses the same mechanisms used for handling
6053 "step-resume" breakpoints. */
6054
6055 static void
6056 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
6057 {
6058 /* There should never be more than one longjmp-resume breakpoint per
6059 thread, so we should never be setting a new
6060 longjmp_resume_breakpoint when one is already active. */
6061 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
6062
6063 if (debug_infrun)
6064 fprintf_unfiltered (gdb_stdlog,
6065 "infrun: inserting longjmp-resume breakpoint at %s\n",
6066 paddress (gdbarch, pc));
6067
6068 inferior_thread ()->control.exception_resume_breakpoint =
6069 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
6070 }
6071
6072 /* Insert an exception resume breakpoint. TP is the thread throwing
6073 the exception. The block B is the block of the unwinder debug hook
6074 function. FRAME is the frame corresponding to the call to this
6075 function. SYM is the symbol of the function argument holding the
6076 target PC of the exception. */
6077
6078 static void
6079 insert_exception_resume_breakpoint (struct thread_info *tp,
6080 const struct block *b,
6081 struct frame_info *frame,
6082 struct symbol *sym)
6083 {
6084 TRY
6085 {
6086 struct symbol *vsym;
6087 struct value *value;
6088 CORE_ADDR handler;
6089 struct breakpoint *bp;
6090
6091 vsym = lookup_symbol (SYMBOL_LINKAGE_NAME (sym), b, VAR_DOMAIN, NULL);
6092 value = read_var_value (vsym, frame);
6093 /* If the value was optimized out, revert to the old behavior. */
6094 if (! value_optimized_out (value))
6095 {
6096 handler = value_as_address (value);
6097
6098 if (debug_infrun)
6099 fprintf_unfiltered (gdb_stdlog,
6100 "infrun: exception resume at %lx\n",
6101 (unsigned long) handler);
6102
6103 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6104 handler, bp_exception_resume);
6105
6106 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
6107 frame = NULL;
6108
6109 bp->thread = tp->num;
6110 inferior_thread ()->control.exception_resume_breakpoint = bp;
6111 }
6112 }
6113 CATCH (e, RETURN_MASK_ERROR)
6114 {
6115 /* We want to ignore errors here. */
6116 }
6117 END_CATCH
6118 }
6119
6120 /* A helper for check_exception_resume that sets an
6121 exception-breakpoint based on a SystemTap probe. */
6122
6123 static void
6124 insert_exception_resume_from_probe (struct thread_info *tp,
6125 const struct bound_probe *probe,
6126 struct frame_info *frame)
6127 {
6128 struct value *arg_value;
6129 CORE_ADDR handler;
6130 struct breakpoint *bp;
6131
6132 arg_value = probe_safe_evaluate_at_pc (frame, 1);
6133 if (!arg_value)
6134 return;
6135
6136 handler = value_as_address (arg_value);
6137
6138 if (debug_infrun)
6139 fprintf_unfiltered (gdb_stdlog,
6140 "infrun: exception resume at %s\n",
6141 paddress (get_objfile_arch (probe->objfile),
6142 handler));
6143
6144 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
6145 handler, bp_exception_resume);
6146 bp->thread = tp->num;
6147 inferior_thread ()->control.exception_resume_breakpoint = bp;
6148 }
6149
6150 /* This is called when an exception has been intercepted. Check to
6151 see whether the exception's destination is of interest, and if so,
6152 set an exception resume breakpoint there. */
6153
6154 static void
6155 check_exception_resume (struct execution_control_state *ecs,
6156 struct frame_info *frame)
6157 {
6158 struct bound_probe probe;
6159 struct symbol *func;
6160
6161 /* First see if this exception unwinding breakpoint was set via a
6162 SystemTap probe point. If so, the probe has two arguments: the
6163 CFA and the HANDLER. We ignore the CFA, extract the handler, and
6164 set a breakpoint there. */
6165 probe = find_probe_by_pc (get_frame_pc (frame));
6166 if (probe.probe)
6167 {
6168 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
6169 return;
6170 }
6171
6172 func = get_frame_function (frame);
6173 if (!func)
6174 return;
6175
6176 TRY
6177 {
6178 const struct block *b;
6179 struct block_iterator iter;
6180 struct symbol *sym;
6181 int argno = 0;
6182
6183 /* The exception breakpoint is a thread-specific breakpoint on
6184 the unwinder's debug hook, declared as:
6185
6186 void _Unwind_DebugHook (void *cfa, void *handler);
6187
6188 The CFA argument indicates the frame to which control is
6189 about to be transferred. HANDLER is the destination PC.
6190
6191 We ignore the CFA and set a temporary breakpoint at HANDLER.
6192 This is not extremely efficient but it avoids issues in gdb
6193 with computing the DWARF CFA, and it also works even in weird
6194 cases such as throwing an exception from inside a signal
6195 handler. */
6196
6197 b = SYMBOL_BLOCK_VALUE (func);
6198 ALL_BLOCK_SYMBOLS (b, iter, sym)
6199 {
6200 if (!SYMBOL_IS_ARGUMENT (sym))
6201 continue;
6202
6203 if (argno == 0)
6204 ++argno;
6205 else
6206 {
6207 insert_exception_resume_breakpoint (ecs->event_thread,
6208 b, frame, sym);
6209 break;
6210 }
6211 }
6212 }
6213 CATCH (e, RETURN_MASK_ERROR)
6214 {
6215 }
6216 END_CATCH
6217 }
6218
6219 static void
6220 stop_waiting (struct execution_control_state *ecs)
6221 {
6222 if (debug_infrun)
6223 fprintf_unfiltered (gdb_stdlog, "infrun: stop_waiting\n");
6224
6225 clear_step_over_info ();
6226
6227 /* Let callers know we don't want to wait for the inferior anymore. */
6228 ecs->wait_some_more = 0;
6229 }
6230
6231 /* Called when we should continue running the inferior, because the
6232 current event doesn't cause a user visible stop. This does the
6233 resuming part; waiting for the next event is done elsewhere. */
6234
6235 static void
6236 keep_going (struct execution_control_state *ecs)
6237 {
6238 /* Make sure normal_stop is called if we get a QUIT handled before
6239 reaching resume. */
6240 struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
6241
6242 /* Save the pc before execution, to compare with pc after stop. */
6243 ecs->event_thread->prev_pc
6244 = regcache_read_pc (get_thread_regcache (ecs->ptid));
6245
6246 if (ecs->event_thread->control.trap_expected
6247 && ecs->event_thread->suspend.stop_signal != GDB_SIGNAL_TRAP)
6248 {
6249 /* We haven't yet gotten our trap, and either: intercepted a
6250 non-signal event (e.g., a fork); or took a signal which we
6251 are supposed to pass through to the inferior. Simply
6252 continue. */
6253 discard_cleanups (old_cleanups);
6254 resume (ecs->event_thread->suspend.stop_signal);
6255 }
6256 else
6257 {
6258 struct regcache *regcache = get_current_regcache ();
6259 int remove_bp;
6260 int remove_wps;
6261
6262 /* Either the trap was not expected, but we are continuing
6263 anyway (if we got a signal, the user asked it be passed to
6264 the child)
6265 -- or --
6266 We got our expected trap, but decided we should resume from
6267 it.
6268
6269 We're going to run this baby now!
6270
6271 Note that insert_breakpoints won't try to re-insert
6272 already inserted breakpoints. Therefore, we don't
6273 care if breakpoints were already inserted, or not. */
6274
6275 /* If we need to step over a breakpoint, and we're not using
6276 displaced stepping to do so, insert all breakpoints
6277 (watchpoints, etc.) but the one we're stepping over, step one
6278 instruction, and then re-insert the breakpoint when that step
6279 is finished. */
6280
6281 remove_bp = (ecs->hit_singlestep_breakpoint
6282 || thread_still_needs_step_over (ecs->event_thread));
6283 remove_wps = (ecs->event_thread->stepping_over_watchpoint
6284 && !target_have_steppable_watchpoint);
6285
6286 /* We can't use displaced stepping if we need to step past a
6287 watchpoint. The instruction copied to the scratch pad would
6288 still trigger the watchpoint. */
6289 if (remove_bp
6290 && (remove_wps
6291 || !use_displaced_stepping (get_regcache_arch (regcache))))
6292 {
6293 set_step_over_info (get_regcache_aspace (regcache),
6294 regcache_read_pc (regcache), remove_wps);
6295 }
6296 else if (remove_wps)
6297 set_step_over_info (NULL, 0, remove_wps);
6298 else
6299 clear_step_over_info ();
6300
6301 /* Stop stepping if inserting breakpoints fails. */
6302 TRY
6303 {
6304 insert_breakpoints ();
6305 }
6306 CATCH (e, RETURN_MASK_ERROR)
6307 {
6308 exception_print (gdb_stderr, e);
6309 stop_waiting (ecs);
6310 discard_cleanups (old_cleanups);
6311 return;
6312 }
6313 END_CATCH
6314
6315 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
6316
6317 /* Do not deliver GDB_SIGNAL_TRAP (except when the user
6318 explicitly specifies that such a signal should be delivered
6319 to the target program). Typically, that would occur when a
6320 user is debugging a target monitor on a simulator: the target
6321 monitor sets a breakpoint; the simulator encounters this
6322 breakpoint and halts the simulation handing control to GDB;
6323 GDB, noting that the stop address doesn't map to any known
6324 breakpoint, returns control back to the simulator; the
6325 simulator then delivers the hardware equivalent of a
6326 GDB_SIGNAL_TRAP to the program being debugged. */
6327 if (ecs->event_thread->suspend.stop_signal == GDB_SIGNAL_TRAP
6328 && !signal_program[ecs->event_thread->suspend.stop_signal])
6329 ecs->event_thread->suspend.stop_signal = GDB_SIGNAL_0;
6330
6331 discard_cleanups (old_cleanups);
6332 resume (ecs->event_thread->suspend.stop_signal);
6333 }
6334
6335 prepare_to_wait (ecs);
6336 }
6337
6338 /* This function normally comes after a resume, before
6339 handle_inferior_event exits. It takes care of any last bits of
6340 housekeeping, and sets the all-important wait_some_more flag. */
6341
6342 static void
6343 prepare_to_wait (struct execution_control_state *ecs)
6344 {
6345 if (debug_infrun)
6346 fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
6347
6348 /* This is the old end of the while loop. Let everybody know we
6349 want to wait for the inferior some more and get called again
6350 soon. */
6351 ecs->wait_some_more = 1;
6352 }
6353
6354 /* We are done with the step range of a step/next/si/ni command.
6355 Called once for each n of a "step n" operation. */
6356
6357 static void
6358 end_stepping_range (struct execution_control_state *ecs)
6359 {
6360 ecs->event_thread->control.stop_step = 1;
6361 stop_waiting (ecs);
6362 }
6363
6364 /* Several print_*_reason functions to print why the inferior has stopped.
6365 We always print something when the inferior exits, or receives a signal.
6366 The rest of the cases are dealt with later on in normal_stop and
6367 print_it_typical. Ideally there should be a call to one of these
6368 print_*_reason functions functions from handle_inferior_event each time
6369 stop_waiting is called.
6370
6371 Note that we don't call these directly, instead we delegate that to
6372 the interpreters, through observers. Interpreters then call these
6373 with whatever uiout is right. */
6374
6375 void
6376 print_end_stepping_range_reason (struct ui_out *uiout)
6377 {
6378 /* For CLI-like interpreters, print nothing. */
6379
6380 if (ui_out_is_mi_like_p (uiout))
6381 {
6382 ui_out_field_string (uiout, "reason",
6383 async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
6384 }
6385 }
6386
6387 void
6388 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6389 {
6390 annotate_signalled ();
6391 if (ui_out_is_mi_like_p (uiout))
6392 ui_out_field_string
6393 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
6394 ui_out_text (uiout, "\nProgram terminated with signal ");
6395 annotate_signal_name ();
6396 ui_out_field_string (uiout, "signal-name",
6397 gdb_signal_to_name (siggnal));
6398 annotate_signal_name_end ();
6399 ui_out_text (uiout, ", ");
6400 annotate_signal_string ();
6401 ui_out_field_string (uiout, "signal-meaning",
6402 gdb_signal_to_string (siggnal));
6403 annotate_signal_string_end ();
6404 ui_out_text (uiout, ".\n");
6405 ui_out_text (uiout, "The program no longer exists.\n");
6406 }
6407
6408 void
6409 print_exited_reason (struct ui_out *uiout, int exitstatus)
6410 {
6411 struct inferior *inf = current_inferior ();
6412 const char *pidstr = target_pid_to_str (pid_to_ptid (inf->pid));
6413
6414 annotate_exited (exitstatus);
6415 if (exitstatus)
6416 {
6417 if (ui_out_is_mi_like_p (uiout))
6418 ui_out_field_string (uiout, "reason",
6419 async_reason_lookup (EXEC_ASYNC_EXITED));
6420 ui_out_text (uiout, "[Inferior ");
6421 ui_out_text (uiout, plongest (inf->num));
6422 ui_out_text (uiout, " (");
6423 ui_out_text (uiout, pidstr);
6424 ui_out_text (uiout, ") exited with code ");
6425 ui_out_field_fmt (uiout, "exit-code", "0%o", (unsigned int) exitstatus);
6426 ui_out_text (uiout, "]\n");
6427 }
6428 else
6429 {
6430 if (ui_out_is_mi_like_p (uiout))
6431 ui_out_field_string
6432 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
6433 ui_out_text (uiout, "[Inferior ");
6434 ui_out_text (uiout, plongest (inf->num));
6435 ui_out_text (uiout, " (");
6436 ui_out_text (uiout, pidstr);
6437 ui_out_text (uiout, ") exited normally]\n");
6438 }
6439 }
6440
6441 void
6442 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
6443 {
6444 annotate_signal ();
6445
6446 if (siggnal == GDB_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
6447 {
6448 struct thread_info *t = inferior_thread ();
6449
6450 ui_out_text (uiout, "\n[");
6451 ui_out_field_string (uiout, "thread-name",
6452 target_pid_to_str (t->ptid));
6453 ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
6454 ui_out_text (uiout, " stopped");
6455 }
6456 else
6457 {
6458 ui_out_text (uiout, "\nProgram received signal ");
6459 annotate_signal_name ();
6460 if (ui_out_is_mi_like_p (uiout))
6461 ui_out_field_string
6462 (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
6463 ui_out_field_string (uiout, "signal-name",
6464 gdb_signal_to_name (siggnal));
6465 annotate_signal_name_end ();
6466 ui_out_text (uiout, ", ");
6467 annotate_signal_string ();
6468 ui_out_field_string (uiout, "signal-meaning",
6469 gdb_signal_to_string (siggnal));
6470 annotate_signal_string_end ();
6471 }
6472 ui_out_text (uiout, ".\n");
6473 }
6474
6475 void
6476 print_no_history_reason (struct ui_out *uiout)
6477 {
6478 ui_out_text (uiout, "\nNo more reverse-execution history.\n");
6479 }
6480
6481 /* Print current location without a level number, if we have changed
6482 functions or hit a breakpoint. Print source line if we have one.
6483 bpstat_print contains the logic deciding in detail what to print,
6484 based on the event(s) that just occurred. */
6485
6486 void
6487 print_stop_event (struct target_waitstatus *ws)
6488 {
6489 int bpstat_ret;
6490 int source_flag;
6491 int do_frame_printing = 1;
6492 struct thread_info *tp = inferior_thread ();
6493
6494 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws->kind);
6495 switch (bpstat_ret)
6496 {
6497 case PRINT_UNKNOWN:
6498 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
6499 should) carry around the function and does (or should) use
6500 that when doing a frame comparison. */
6501 if (tp->control.stop_step
6502 && frame_id_eq (tp->control.step_frame_id,
6503 get_frame_id (get_current_frame ()))
6504 && tp->control.step_start_function == find_pc_function (stop_pc))
6505 {
6506 /* Finished step, just print source line. */
6507 source_flag = SRC_LINE;
6508 }
6509 else
6510 {
6511 /* Print location and source line. */
6512 source_flag = SRC_AND_LOC;
6513 }
6514 break;
6515 case PRINT_SRC_AND_LOC:
6516 /* Print location and source line. */
6517 source_flag = SRC_AND_LOC;
6518 break;
6519 case PRINT_SRC_ONLY:
6520 source_flag = SRC_LINE;
6521 break;
6522 case PRINT_NOTHING:
6523 /* Something bogus. */
6524 source_flag = SRC_LINE;
6525 do_frame_printing = 0;
6526 break;
6527 default:
6528 internal_error (__FILE__, __LINE__, _("Unknown value."));
6529 }
6530
6531 /* The behavior of this routine with respect to the source
6532 flag is:
6533 SRC_LINE: Print only source line
6534 LOCATION: Print only location
6535 SRC_AND_LOC: Print location and source line. */
6536 if (do_frame_printing)
6537 print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
6538
6539 /* Display the auto-display expressions. */
6540 do_displays ();
6541 }
6542
6543 /* Here to return control to GDB when the inferior stops for real.
6544 Print appropriate messages, remove breakpoints, give terminal our modes.
6545
6546 STOP_PRINT_FRAME nonzero means print the executing frame
6547 (pc, function, args, file, line number and line text).
6548 BREAKPOINTS_FAILED nonzero means stop was due to error
6549 attempting to insert breakpoints. */
6550
6551 void
6552 normal_stop (void)
6553 {
6554 struct target_waitstatus last;
6555 ptid_t last_ptid;
6556 struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
6557
6558 get_last_target_status (&last_ptid, &last);
6559
6560 /* If an exception is thrown from this point on, make sure to
6561 propagate GDB's knowledge of the executing state to the
6562 frontend/user running state. A QUIT is an easy exception to see
6563 here, so do this before any filtered output. */
6564 if (!non_stop)
6565 make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
6566 else if (last.kind != TARGET_WAITKIND_SIGNALLED
6567 && last.kind != TARGET_WAITKIND_EXITED
6568 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6569 make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
6570
6571 /* As we're presenting a stop, and potentially removing breakpoints,
6572 update the thread list so we can tell whether there are threads
6573 running on the target. With target remote, for example, we can
6574 only learn about new threads when we explicitly update the thread
6575 list. Do this before notifying the interpreters about signal
6576 stops, end of stepping ranges, etc., so that the "new thread"
6577 output is emitted before e.g., "Program received signal FOO",
6578 instead of after. */
6579 update_thread_list ();
6580
6581 if (last.kind == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
6582 observer_notify_signal_received (inferior_thread ()->suspend.stop_signal);
6583
6584 /* As with the notification of thread events, we want to delay
6585 notifying the user that we've switched thread context until
6586 the inferior actually stops.
6587
6588 There's no point in saying anything if the inferior has exited.
6589 Note that SIGNALLED here means "exited with a signal", not
6590 "received a signal".
6591
6592 Also skip saying anything in non-stop mode. In that mode, as we
6593 don't want GDB to switch threads behind the user's back, to avoid
6594 races where the user is typing a command to apply to thread x,
6595 but GDB switches to thread y before the user finishes entering
6596 the command, fetch_inferior_event installs a cleanup to restore
6597 the current thread back to the thread the user had selected right
6598 after this event is handled, so we're not really switching, only
6599 informing of a stop. */
6600 if (!non_stop
6601 && !ptid_equal (previous_inferior_ptid, inferior_ptid)
6602 && target_has_execution
6603 && last.kind != TARGET_WAITKIND_SIGNALLED
6604 && last.kind != TARGET_WAITKIND_EXITED
6605 && last.kind != TARGET_WAITKIND_NO_RESUMED)
6606 {
6607 target_terminal_ours_for_output ();
6608 printf_filtered (_("[Switching to %s]\n"),
6609 target_pid_to_str (inferior_ptid));
6610 annotate_thread_changed ();
6611 previous_inferior_ptid = inferior_ptid;
6612 }
6613
6614 if (last.kind == TARGET_WAITKIND_NO_RESUMED)
6615 {
6616 gdb_assert (sync_execution || !target_can_async_p ());
6617
6618 target_terminal_ours_for_output ();
6619 printf_filtered (_("No unwaited-for children left.\n"));
6620 }
6621
6622 /* Note: this depends on the update_thread_list call above. */
6623 if (!breakpoints_should_be_inserted_now () && target_has_execution)
6624 {
6625 if (remove_breakpoints ())
6626 {
6627 target_terminal_ours_for_output ();
6628 printf_filtered (_("Cannot remove breakpoints because "
6629 "program is no longer writable.\nFurther "
6630 "execution is probably impossible.\n"));
6631 }
6632 }
6633
6634 /* If an auto-display called a function and that got a signal,
6635 delete that auto-display to avoid an infinite recursion. */
6636
6637 if (stopped_by_random_signal)
6638 disable_current_display ();
6639
6640 /* Notify observers if we finished a "step"-like command, etc. */
6641 if (target_has_execution
6642 && last.kind != TARGET_WAITKIND_SIGNALLED
6643 && last.kind != TARGET_WAITKIND_EXITED
6644 && inferior_thread ()->control.stop_step)
6645 {
6646 /* But not if in the middle of doing a "step n" operation for
6647 n > 1 */
6648 if (inferior_thread ()->step_multi)
6649 goto done;
6650
6651 observer_notify_end_stepping_range ();
6652 }
6653
6654 target_terminal_ours ();
6655 async_enable_stdin ();
6656
6657 /* Set the current source location. This will also happen if we
6658 display the frame below, but the current SAL will be incorrect
6659 during a user hook-stop function. */
6660 if (has_stack_frames () && !stop_stack_dummy)
6661 set_current_sal_from_frame (get_current_frame ());
6662
6663 /* Let the user/frontend see the threads as stopped, but do nothing
6664 if the thread was running an infcall. We may be e.g., evaluating
6665 a breakpoint condition. In that case, the thread had state
6666 THREAD_RUNNING before the infcall, and shall remain set to
6667 running, all without informing the user/frontend about state
6668 transition changes. If this is actually a call command, then the
6669 thread was originally already stopped, so there's no state to
6670 finish either. */
6671 if (target_has_execution && inferior_thread ()->control.in_infcall)
6672 discard_cleanups (old_chain);
6673 else
6674 do_cleanups (old_chain);
6675
6676 /* Look up the hook_stop and run it (CLI internally handles problem
6677 of stop_command's pre-hook not existing). */
6678 if (stop_command)
6679 catch_errors (hook_stop_stub, stop_command,
6680 "Error while running hook_stop:\n", RETURN_MASK_ALL);
6681
6682 if (!has_stack_frames ())
6683 goto done;
6684
6685 if (last.kind == TARGET_WAITKIND_SIGNALLED
6686 || last.kind == TARGET_WAITKIND_EXITED)
6687 goto done;
6688
6689 /* Select innermost stack frame - i.e., current frame is frame 0,
6690 and current location is based on that.
6691 Don't do this on return from a stack dummy routine,
6692 or if the program has exited. */
6693
6694 if (!stop_stack_dummy)
6695 {
6696 select_frame (get_current_frame ());
6697
6698 /* If --batch-silent is enabled then there's no need to print the current
6699 source location, and to try risks causing an error message about
6700 missing source files. */
6701 if (stop_print_frame && !batch_silent)
6702 print_stop_event (&last);
6703 }
6704
6705 /* Save the function value return registers, if we care.
6706 We might be about to restore their previous contents. */
6707 if (inferior_thread ()->control.proceed_to_finish
6708 && execution_direction != EXEC_REVERSE)
6709 {
6710 /* This should not be necessary. */
6711 if (stop_registers)
6712 regcache_xfree (stop_registers);
6713
6714 /* NB: The copy goes through to the target picking up the value of
6715 all the registers. */
6716 stop_registers = regcache_dup (get_current_regcache ());
6717 }
6718
6719 if (stop_stack_dummy == STOP_STACK_DUMMY)
6720 {
6721 /* Pop the empty frame that contains the stack dummy.
6722 This also restores inferior state prior to the call
6723 (struct infcall_suspend_state). */
6724 struct frame_info *frame = get_current_frame ();
6725
6726 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
6727 frame_pop (frame);
6728 /* frame_pop() calls reinit_frame_cache as the last thing it
6729 does which means there's currently no selected frame. We
6730 don't need to re-establish a selected frame if the dummy call
6731 returns normally, that will be done by
6732 restore_infcall_control_state. However, we do have to handle
6733 the case where the dummy call is returning after being
6734 stopped (e.g. the dummy call previously hit a breakpoint).
6735 We can't know which case we have so just always re-establish
6736 a selected frame here. */
6737 select_frame (get_current_frame ());
6738 }
6739
6740 done:
6741 annotate_stopped ();
6742
6743 /* Suppress the stop observer if we're in the middle of:
6744
6745 - a step n (n > 1), as there still more steps to be done.
6746
6747 - a "finish" command, as the observer will be called in
6748 finish_command_continuation, so it can include the inferior
6749 function's return value.
6750
6751 - calling an inferior function, as we pretend we inferior didn't
6752 run at all. The return value of the call is handled by the
6753 expression evaluator, through call_function_by_hand. */
6754
6755 if (!target_has_execution
6756 || last.kind == TARGET_WAITKIND_SIGNALLED
6757 || last.kind == TARGET_WAITKIND_EXITED
6758 || last.kind == TARGET_WAITKIND_NO_RESUMED
6759 || (!(inferior_thread ()->step_multi
6760 && inferior_thread ()->control.stop_step)
6761 && !(inferior_thread ()->control.stop_bpstat
6762 && inferior_thread ()->control.proceed_to_finish)
6763 && !inferior_thread ()->control.in_infcall))
6764 {
6765 if (!ptid_equal (inferior_ptid, null_ptid))
6766 observer_notify_normal_stop (inferior_thread ()->control.stop_bpstat,
6767 stop_print_frame);
6768 else
6769 observer_notify_normal_stop (NULL, stop_print_frame);
6770 }
6771
6772 if (target_has_execution)
6773 {
6774 if (last.kind != TARGET_WAITKIND_SIGNALLED
6775 && last.kind != TARGET_WAITKIND_EXITED)
6776 /* Delete the breakpoint we stopped at, if it wants to be deleted.
6777 Delete any breakpoint that is to be deleted at the next stop. */
6778 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
6779 }
6780
6781 /* Try to get rid of automatically added inferiors that are no
6782 longer needed. Keeping those around slows down things linearly.
6783 Note that this never removes the current inferior. */
6784 prune_inferiors ();
6785 }
6786
6787 static int
6788 hook_stop_stub (void *cmd)
6789 {
6790 execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
6791 return (0);
6792 }
6793 \f
6794 int
6795 signal_stop_state (int signo)
6796 {
6797 return signal_stop[signo];
6798 }
6799
6800 int
6801 signal_print_state (int signo)
6802 {
6803 return signal_print[signo];
6804 }
6805
6806 int
6807 signal_pass_state (int signo)
6808 {
6809 return signal_program[signo];
6810 }
6811
6812 static void
6813 signal_cache_update (int signo)
6814 {
6815 if (signo == -1)
6816 {
6817 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
6818 signal_cache_update (signo);
6819
6820 return;
6821 }
6822
6823 signal_pass[signo] = (signal_stop[signo] == 0
6824 && signal_print[signo] == 0
6825 && signal_program[signo] == 1
6826 && signal_catch[signo] == 0);
6827 }
6828
6829 int
6830 signal_stop_update (int signo, int state)
6831 {
6832 int ret = signal_stop[signo];
6833
6834 signal_stop[signo] = state;
6835 signal_cache_update (signo);
6836 return ret;
6837 }
6838
6839 int
6840 signal_print_update (int signo, int state)
6841 {
6842 int ret = signal_print[signo];
6843
6844 signal_print[signo] = state;
6845 signal_cache_update (signo);
6846 return ret;
6847 }
6848
6849 int
6850 signal_pass_update (int signo, int state)
6851 {
6852 int ret = signal_program[signo];
6853
6854 signal_program[signo] = state;
6855 signal_cache_update (signo);
6856 return ret;
6857 }
6858
6859 /* Update the global 'signal_catch' from INFO and notify the
6860 target. */
6861
6862 void
6863 signal_catch_update (const unsigned int *info)
6864 {
6865 int i;
6866
6867 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
6868 signal_catch[i] = info[i] > 0;
6869 signal_cache_update (-1);
6870 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
6871 }
6872
6873 static void
6874 sig_print_header (void)
6875 {
6876 printf_filtered (_("Signal Stop\tPrint\tPass "
6877 "to program\tDescription\n"));
6878 }
6879
6880 static void
6881 sig_print_info (enum gdb_signal oursig)
6882 {
6883 const char *name = gdb_signal_to_name (oursig);
6884 int name_padding = 13 - strlen (name);
6885
6886 if (name_padding <= 0)
6887 name_padding = 0;
6888
6889 printf_filtered ("%s", name);
6890 printf_filtered ("%*.*s ", name_padding, name_padding, " ");
6891 printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
6892 printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
6893 printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
6894 printf_filtered ("%s\n", gdb_signal_to_string (oursig));
6895 }
6896
6897 /* Specify how various signals in the inferior should be handled. */
6898
6899 static void
6900 handle_command (char *args, int from_tty)
6901 {
6902 char **argv;
6903 int digits, wordlen;
6904 int sigfirst, signum, siglast;
6905 enum gdb_signal oursig;
6906 int allsigs;
6907 int nsigs;
6908 unsigned char *sigs;
6909 struct cleanup *old_chain;
6910
6911 if (args == NULL)
6912 {
6913 error_no_arg (_("signal to handle"));
6914 }
6915
6916 /* Allocate and zero an array of flags for which signals to handle. */
6917
6918 nsigs = (int) GDB_SIGNAL_LAST;
6919 sigs = (unsigned char *) alloca (nsigs);
6920 memset (sigs, 0, nsigs);
6921
6922 /* Break the command line up into args. */
6923
6924 argv = gdb_buildargv (args);
6925 old_chain = make_cleanup_freeargv (argv);
6926
6927 /* Walk through the args, looking for signal oursigs, signal names, and
6928 actions. Signal numbers and signal names may be interspersed with
6929 actions, with the actions being performed for all signals cumulatively
6930 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
6931
6932 while (*argv != NULL)
6933 {
6934 wordlen = strlen (*argv);
6935 for (digits = 0; isdigit ((*argv)[digits]); digits++)
6936 {;
6937 }
6938 allsigs = 0;
6939 sigfirst = siglast = -1;
6940
6941 if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
6942 {
6943 /* Apply action to all signals except those used by the
6944 debugger. Silently skip those. */
6945 allsigs = 1;
6946 sigfirst = 0;
6947 siglast = nsigs - 1;
6948 }
6949 else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
6950 {
6951 SET_SIGS (nsigs, sigs, signal_stop);
6952 SET_SIGS (nsigs, sigs, signal_print);
6953 }
6954 else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
6955 {
6956 UNSET_SIGS (nsigs, sigs, signal_program);
6957 }
6958 else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
6959 {
6960 SET_SIGS (nsigs, sigs, signal_print);
6961 }
6962 else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
6963 {
6964 SET_SIGS (nsigs, sigs, signal_program);
6965 }
6966 else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
6967 {
6968 UNSET_SIGS (nsigs, sigs, signal_stop);
6969 }
6970 else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
6971 {
6972 SET_SIGS (nsigs, sigs, signal_program);
6973 }
6974 else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
6975 {
6976 UNSET_SIGS (nsigs, sigs, signal_print);
6977 UNSET_SIGS (nsigs, sigs, signal_stop);
6978 }
6979 else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
6980 {
6981 UNSET_SIGS (nsigs, sigs, signal_program);
6982 }
6983 else if (digits > 0)
6984 {
6985 /* It is numeric. The numeric signal refers to our own
6986 internal signal numbering from target.h, not to host/target
6987 signal number. This is a feature; users really should be
6988 using symbolic names anyway, and the common ones like
6989 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
6990
6991 sigfirst = siglast = (int)
6992 gdb_signal_from_command (atoi (*argv));
6993 if ((*argv)[digits] == '-')
6994 {
6995 siglast = (int)
6996 gdb_signal_from_command (atoi ((*argv) + digits + 1));
6997 }
6998 if (sigfirst > siglast)
6999 {
7000 /* Bet he didn't figure we'd think of this case... */
7001 signum = sigfirst;
7002 sigfirst = siglast;
7003 siglast = signum;
7004 }
7005 }
7006 else
7007 {
7008 oursig = gdb_signal_from_name (*argv);
7009 if (oursig != GDB_SIGNAL_UNKNOWN)
7010 {
7011 sigfirst = siglast = (int) oursig;
7012 }
7013 else
7014 {
7015 /* Not a number and not a recognized flag word => complain. */
7016 error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
7017 }
7018 }
7019
7020 /* If any signal numbers or symbol names were found, set flags for
7021 which signals to apply actions to. */
7022
7023 for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
7024 {
7025 switch ((enum gdb_signal) signum)
7026 {
7027 case GDB_SIGNAL_TRAP:
7028 case GDB_SIGNAL_INT:
7029 if (!allsigs && !sigs[signum])
7030 {
7031 if (query (_("%s is used by the debugger.\n\
7032 Are you sure you want to change it? "),
7033 gdb_signal_to_name ((enum gdb_signal) signum)))
7034 {
7035 sigs[signum] = 1;
7036 }
7037 else
7038 {
7039 printf_unfiltered (_("Not confirmed, unchanged.\n"));
7040 gdb_flush (gdb_stdout);
7041 }
7042 }
7043 break;
7044 case GDB_SIGNAL_0:
7045 case GDB_SIGNAL_DEFAULT:
7046 case GDB_SIGNAL_UNKNOWN:
7047 /* Make sure that "all" doesn't print these. */
7048 break;
7049 default:
7050 sigs[signum] = 1;
7051 break;
7052 }
7053 }
7054
7055 argv++;
7056 }
7057
7058 for (signum = 0; signum < nsigs; signum++)
7059 if (sigs[signum])
7060 {
7061 signal_cache_update (-1);
7062 target_pass_signals ((int) GDB_SIGNAL_LAST, signal_pass);
7063 target_program_signals ((int) GDB_SIGNAL_LAST, signal_program);
7064
7065 if (from_tty)
7066 {
7067 /* Show the results. */
7068 sig_print_header ();
7069 for (; signum < nsigs; signum++)
7070 if (sigs[signum])
7071 sig_print_info (signum);
7072 }
7073
7074 break;
7075 }
7076
7077 do_cleanups (old_chain);
7078 }
7079
7080 /* Complete the "handle" command. */
7081
7082 static VEC (char_ptr) *
7083 handle_completer (struct cmd_list_element *ignore,
7084 const char *text, const char *word)
7085 {
7086 VEC (char_ptr) *vec_signals, *vec_keywords, *return_val;
7087 static const char * const keywords[] =
7088 {
7089 "all",
7090 "stop",
7091 "ignore",
7092 "print",
7093 "pass",
7094 "nostop",
7095 "noignore",
7096 "noprint",
7097 "nopass",
7098 NULL,
7099 };
7100
7101 vec_signals = signal_completer (ignore, text, word);
7102 vec_keywords = complete_on_enum (keywords, word, word);
7103
7104 return_val = VEC_merge (char_ptr, vec_signals, vec_keywords);
7105 VEC_free (char_ptr, vec_signals);
7106 VEC_free (char_ptr, vec_keywords);
7107 return return_val;
7108 }
7109
7110 enum gdb_signal
7111 gdb_signal_from_command (int num)
7112 {
7113 if (num >= 1 && num <= 15)
7114 return (enum gdb_signal) num;
7115 error (_("Only signals 1-15 are valid as numeric signals.\n\
7116 Use \"info signals\" for a list of symbolic signals."));
7117 }
7118
7119 /* Print current contents of the tables set by the handle command.
7120 It is possible we should just be printing signals actually used
7121 by the current target (but for things to work right when switching
7122 targets, all signals should be in the signal tables). */
7123
7124 static void
7125 signals_info (char *signum_exp, int from_tty)
7126 {
7127 enum gdb_signal oursig;
7128
7129 sig_print_header ();
7130
7131 if (signum_exp)
7132 {
7133 /* First see if this is a symbol name. */
7134 oursig = gdb_signal_from_name (signum_exp);
7135 if (oursig == GDB_SIGNAL_UNKNOWN)
7136 {
7137 /* No, try numeric. */
7138 oursig =
7139 gdb_signal_from_command (parse_and_eval_long (signum_exp));
7140 }
7141 sig_print_info (oursig);
7142 return;
7143 }
7144
7145 printf_filtered ("\n");
7146 /* These ugly casts brought to you by the native VAX compiler. */
7147 for (oursig = GDB_SIGNAL_FIRST;
7148 (int) oursig < (int) GDB_SIGNAL_LAST;
7149 oursig = (enum gdb_signal) ((int) oursig + 1))
7150 {
7151 QUIT;
7152
7153 if (oursig != GDB_SIGNAL_UNKNOWN
7154 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
7155 sig_print_info (oursig);
7156 }
7157
7158 printf_filtered (_("\nUse the \"handle\" command "
7159 "to change these tables.\n"));
7160 }
7161
7162 /* Check if it makes sense to read $_siginfo from the current thread
7163 at this point. If not, throw an error. */
7164
7165 static void
7166 validate_siginfo_access (void)
7167 {
7168 /* No current inferior, no siginfo. */
7169 if (ptid_equal (inferior_ptid, null_ptid))
7170 error (_("No thread selected."));
7171
7172 /* Don't try to read from a dead thread. */
7173 if (is_exited (inferior_ptid))
7174 error (_("The current thread has terminated"));
7175
7176 /* ... or from a spinning thread. */
7177 if (is_running (inferior_ptid))
7178 error (_("Selected thread is running."));
7179 }
7180
7181 /* The $_siginfo convenience variable is a bit special. We don't know
7182 for sure the type of the value until we actually have a chance to
7183 fetch the data. The type can change depending on gdbarch, so it is
7184 also dependent on which thread you have selected.
7185
7186 1. making $_siginfo be an internalvar that creates a new value on
7187 access.
7188
7189 2. making the value of $_siginfo be an lval_computed value. */
7190
7191 /* This function implements the lval_computed support for reading a
7192 $_siginfo value. */
7193
7194 static void
7195 siginfo_value_read (struct value *v)
7196 {
7197 LONGEST transferred;
7198
7199 validate_siginfo_access ();
7200
7201 transferred =
7202 target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
7203 NULL,
7204 value_contents_all_raw (v),
7205 value_offset (v),
7206 TYPE_LENGTH (value_type (v)));
7207
7208 if (transferred != TYPE_LENGTH (value_type (v)))
7209 error (_("Unable to read siginfo"));
7210 }
7211
7212 /* This function implements the lval_computed support for writing a
7213 $_siginfo value. */
7214
7215 static void
7216 siginfo_value_write (struct value *v, struct value *fromval)
7217 {
7218 LONGEST transferred;
7219
7220 validate_siginfo_access ();
7221
7222 transferred = target_write (&current_target,
7223 TARGET_OBJECT_SIGNAL_INFO,
7224 NULL,
7225 value_contents_all_raw (fromval),
7226 value_offset (v),
7227 TYPE_LENGTH (value_type (fromval)));
7228
7229 if (transferred != TYPE_LENGTH (value_type (fromval)))
7230 error (_("Unable to write siginfo"));
7231 }
7232
7233 static const struct lval_funcs siginfo_value_funcs =
7234 {
7235 siginfo_value_read,
7236 siginfo_value_write
7237 };
7238
7239 /* Return a new value with the correct type for the siginfo object of
7240 the current thread using architecture GDBARCH. Return a void value
7241 if there's no object available. */
7242
7243 static struct value *
7244 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
7245 void *ignore)
7246 {
7247 if (target_has_stack
7248 && !ptid_equal (inferior_ptid, null_ptid)
7249 && gdbarch_get_siginfo_type_p (gdbarch))
7250 {
7251 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7252
7253 return allocate_computed_value (type, &siginfo_value_funcs, NULL);
7254 }
7255
7256 return allocate_value (builtin_type (gdbarch)->builtin_void);
7257 }
7258
7259 \f
7260 /* infcall_suspend_state contains state about the program itself like its
7261 registers and any signal it received when it last stopped.
7262 This state must be restored regardless of how the inferior function call
7263 ends (either successfully, or after it hits a breakpoint or signal)
7264 if the program is to properly continue where it left off. */
7265
7266 struct infcall_suspend_state
7267 {
7268 struct thread_suspend_state thread_suspend;
7269 #if 0 /* Currently unused and empty structures are not valid C. */
7270 struct inferior_suspend_state inferior_suspend;
7271 #endif
7272
7273 /* Other fields: */
7274 CORE_ADDR stop_pc;
7275 struct regcache *registers;
7276
7277 /* Format of SIGINFO_DATA or NULL if it is not present. */
7278 struct gdbarch *siginfo_gdbarch;
7279
7280 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
7281 TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
7282 content would be invalid. */
7283 gdb_byte *siginfo_data;
7284 };
7285
7286 struct infcall_suspend_state *
7287 save_infcall_suspend_state (void)
7288 {
7289 struct infcall_suspend_state *inf_state;
7290 struct thread_info *tp = inferior_thread ();
7291 #if 0
7292 struct inferior *inf = current_inferior ();
7293 #endif
7294 struct regcache *regcache = get_current_regcache ();
7295 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7296 gdb_byte *siginfo_data = NULL;
7297
7298 if (gdbarch_get_siginfo_type_p (gdbarch))
7299 {
7300 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7301 size_t len = TYPE_LENGTH (type);
7302 struct cleanup *back_to;
7303
7304 siginfo_data = xmalloc (len);
7305 back_to = make_cleanup (xfree, siginfo_data);
7306
7307 if (target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7308 siginfo_data, 0, len) == len)
7309 discard_cleanups (back_to);
7310 else
7311 {
7312 /* Errors ignored. */
7313 do_cleanups (back_to);
7314 siginfo_data = NULL;
7315 }
7316 }
7317
7318 inf_state = XCNEW (struct infcall_suspend_state);
7319
7320 if (siginfo_data)
7321 {
7322 inf_state->siginfo_gdbarch = gdbarch;
7323 inf_state->siginfo_data = siginfo_data;
7324 }
7325
7326 inf_state->thread_suspend = tp->suspend;
7327 #if 0 /* Currently unused and empty structures are not valid C. */
7328 inf_state->inferior_suspend = inf->suspend;
7329 #endif
7330
7331 /* run_inferior_call will not use the signal due to its `proceed' call with
7332 GDB_SIGNAL_0 anyway. */
7333 tp->suspend.stop_signal = GDB_SIGNAL_0;
7334
7335 inf_state->stop_pc = stop_pc;
7336
7337 inf_state->registers = regcache_dup (regcache);
7338
7339 return inf_state;
7340 }
7341
7342 /* Restore inferior session state to INF_STATE. */
7343
7344 void
7345 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7346 {
7347 struct thread_info *tp = inferior_thread ();
7348 #if 0
7349 struct inferior *inf = current_inferior ();
7350 #endif
7351 struct regcache *regcache = get_current_regcache ();
7352 struct gdbarch *gdbarch = get_regcache_arch (regcache);
7353
7354 tp->suspend = inf_state->thread_suspend;
7355 #if 0 /* Currently unused and empty structures are not valid C. */
7356 inf->suspend = inf_state->inferior_suspend;
7357 #endif
7358
7359 stop_pc = inf_state->stop_pc;
7360
7361 if (inf_state->siginfo_gdbarch == gdbarch)
7362 {
7363 struct type *type = gdbarch_get_siginfo_type (gdbarch);
7364
7365 /* Errors ignored. */
7366 target_write (&current_target, TARGET_OBJECT_SIGNAL_INFO, NULL,
7367 inf_state->siginfo_data, 0, TYPE_LENGTH (type));
7368 }
7369
7370 /* The inferior can be gone if the user types "print exit(0)"
7371 (and perhaps other times). */
7372 if (target_has_execution)
7373 /* NB: The register write goes through to the target. */
7374 regcache_cpy (regcache, inf_state->registers);
7375
7376 discard_infcall_suspend_state (inf_state);
7377 }
7378
7379 static void
7380 do_restore_infcall_suspend_state_cleanup (void *state)
7381 {
7382 restore_infcall_suspend_state (state);
7383 }
7384
7385 struct cleanup *
7386 make_cleanup_restore_infcall_suspend_state
7387 (struct infcall_suspend_state *inf_state)
7388 {
7389 return make_cleanup (do_restore_infcall_suspend_state_cleanup, inf_state);
7390 }
7391
7392 void
7393 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
7394 {
7395 regcache_xfree (inf_state->registers);
7396 xfree (inf_state->siginfo_data);
7397 xfree (inf_state);
7398 }
7399
7400 struct regcache *
7401 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
7402 {
7403 return inf_state->registers;
7404 }
7405
7406 /* infcall_control_state contains state regarding gdb's control of the
7407 inferior itself like stepping control. It also contains session state like
7408 the user's currently selected frame. */
7409
7410 struct infcall_control_state
7411 {
7412 struct thread_control_state thread_control;
7413 struct inferior_control_state inferior_control;
7414
7415 /* Other fields: */
7416 enum stop_stack_kind stop_stack_dummy;
7417 int stopped_by_random_signal;
7418 int stop_after_trap;
7419
7420 /* ID if the selected frame when the inferior function call was made. */
7421 struct frame_id selected_frame_id;
7422 };
7423
7424 /* Save all of the information associated with the inferior<==>gdb
7425 connection. */
7426
7427 struct infcall_control_state *
7428 save_infcall_control_state (void)
7429 {
7430 struct infcall_control_state *inf_status = xmalloc (sizeof (*inf_status));
7431 struct thread_info *tp = inferior_thread ();
7432 struct inferior *inf = current_inferior ();
7433
7434 inf_status->thread_control = tp->control;
7435 inf_status->inferior_control = inf->control;
7436
7437 tp->control.step_resume_breakpoint = NULL;
7438 tp->control.exception_resume_breakpoint = NULL;
7439
7440 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
7441 chain. If caller's caller is walking the chain, they'll be happier if we
7442 hand them back the original chain when restore_infcall_control_state is
7443 called. */
7444 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
7445
7446 /* Other fields: */
7447 inf_status->stop_stack_dummy = stop_stack_dummy;
7448 inf_status->stopped_by_random_signal = stopped_by_random_signal;
7449 inf_status->stop_after_trap = stop_after_trap;
7450
7451 inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
7452
7453 return inf_status;
7454 }
7455
7456 static int
7457 restore_selected_frame (void *args)
7458 {
7459 struct frame_id *fid = (struct frame_id *) args;
7460 struct frame_info *frame;
7461
7462 frame = frame_find_by_id (*fid);
7463
7464 /* If inf_status->selected_frame_id is NULL, there was no previously
7465 selected frame. */
7466 if (frame == NULL)
7467 {
7468 warning (_("Unable to restore previously selected frame."));
7469 return 0;
7470 }
7471
7472 select_frame (frame);
7473
7474 return (1);
7475 }
7476
7477 /* Restore inferior session state to INF_STATUS. */
7478
7479 void
7480 restore_infcall_control_state (struct infcall_control_state *inf_status)
7481 {
7482 struct thread_info *tp = inferior_thread ();
7483 struct inferior *inf = current_inferior ();
7484
7485 if (tp->control.step_resume_breakpoint)
7486 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
7487
7488 if (tp->control.exception_resume_breakpoint)
7489 tp->control.exception_resume_breakpoint->disposition
7490 = disp_del_at_next_stop;
7491
7492 /* Handle the bpstat_copy of the chain. */
7493 bpstat_clear (&tp->control.stop_bpstat);
7494
7495 tp->control = inf_status->thread_control;
7496 inf->control = inf_status->inferior_control;
7497
7498 /* Other fields: */
7499 stop_stack_dummy = inf_status->stop_stack_dummy;
7500 stopped_by_random_signal = inf_status->stopped_by_random_signal;
7501 stop_after_trap = inf_status->stop_after_trap;
7502
7503 if (target_has_stack)
7504 {
7505 /* The point of catch_errors is that if the stack is clobbered,
7506 walking the stack might encounter a garbage pointer and
7507 error() trying to dereference it. */
7508 if (catch_errors
7509 (restore_selected_frame, &inf_status->selected_frame_id,
7510 "Unable to restore previously selected frame:\n",
7511 RETURN_MASK_ERROR) == 0)
7512 /* Error in restoring the selected frame. Select the innermost
7513 frame. */
7514 select_frame (get_current_frame ());
7515 }
7516
7517 xfree (inf_status);
7518 }
7519
7520 static void
7521 do_restore_infcall_control_state_cleanup (void *sts)
7522 {
7523 restore_infcall_control_state (sts);
7524 }
7525
7526 struct cleanup *
7527 make_cleanup_restore_infcall_control_state
7528 (struct infcall_control_state *inf_status)
7529 {
7530 return make_cleanup (do_restore_infcall_control_state_cleanup, inf_status);
7531 }
7532
7533 void
7534 discard_infcall_control_state (struct infcall_control_state *inf_status)
7535 {
7536 if (inf_status->thread_control.step_resume_breakpoint)
7537 inf_status->thread_control.step_resume_breakpoint->disposition
7538 = disp_del_at_next_stop;
7539
7540 if (inf_status->thread_control.exception_resume_breakpoint)
7541 inf_status->thread_control.exception_resume_breakpoint->disposition
7542 = disp_del_at_next_stop;
7543
7544 /* See save_infcall_control_state for info on stop_bpstat. */
7545 bpstat_clear (&inf_status->thread_control.stop_bpstat);
7546
7547 xfree (inf_status);
7548 }
7549 \f
7550 /* restore_inferior_ptid() will be used by the cleanup machinery
7551 to restore the inferior_ptid value saved in a call to
7552 save_inferior_ptid(). */
7553
7554 static void
7555 restore_inferior_ptid (void *arg)
7556 {
7557 ptid_t *saved_ptid_ptr = arg;
7558
7559 inferior_ptid = *saved_ptid_ptr;
7560 xfree (arg);
7561 }
7562
7563 /* Save the value of inferior_ptid so that it may be restored by a
7564 later call to do_cleanups(). Returns the struct cleanup pointer
7565 needed for later doing the cleanup. */
7566
7567 struct cleanup *
7568 save_inferior_ptid (void)
7569 {
7570 ptid_t *saved_ptid_ptr;
7571
7572 saved_ptid_ptr = xmalloc (sizeof (ptid_t));
7573 *saved_ptid_ptr = inferior_ptid;
7574 return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
7575 }
7576
7577 /* See infrun.h. */
7578
7579 void
7580 clear_exit_convenience_vars (void)
7581 {
7582 clear_internalvar (lookup_internalvar ("_exitsignal"));
7583 clear_internalvar (lookup_internalvar ("_exitcode"));
7584 }
7585 \f
7586
7587 /* User interface for reverse debugging:
7588 Set exec-direction / show exec-direction commands
7589 (returns error unless target implements to_set_exec_direction method). */
7590
7591 int execution_direction = EXEC_FORWARD;
7592 static const char exec_forward[] = "forward";
7593 static const char exec_reverse[] = "reverse";
7594 static const char *exec_direction = exec_forward;
7595 static const char *const exec_direction_names[] = {
7596 exec_forward,
7597 exec_reverse,
7598 NULL
7599 };
7600
7601 static void
7602 set_exec_direction_func (char *args, int from_tty,
7603 struct cmd_list_element *cmd)
7604 {
7605 if (target_can_execute_reverse)
7606 {
7607 if (!strcmp (exec_direction, exec_forward))
7608 execution_direction = EXEC_FORWARD;
7609 else if (!strcmp (exec_direction, exec_reverse))
7610 execution_direction = EXEC_REVERSE;
7611 }
7612 else
7613 {
7614 exec_direction = exec_forward;
7615 error (_("Target does not support this operation."));
7616 }
7617 }
7618
7619 static void
7620 show_exec_direction_func (struct ui_file *out, int from_tty,
7621 struct cmd_list_element *cmd, const char *value)
7622 {
7623 switch (execution_direction) {
7624 case EXEC_FORWARD:
7625 fprintf_filtered (out, _("Forward.\n"));
7626 break;
7627 case EXEC_REVERSE:
7628 fprintf_filtered (out, _("Reverse.\n"));
7629 break;
7630 default:
7631 internal_error (__FILE__, __LINE__,
7632 _("bogus execution_direction value: %d"),
7633 (int) execution_direction);
7634 }
7635 }
7636
7637 static void
7638 show_schedule_multiple (struct ui_file *file, int from_tty,
7639 struct cmd_list_element *c, const char *value)
7640 {
7641 fprintf_filtered (file, _("Resuming the execution of threads "
7642 "of all processes is %s.\n"), value);
7643 }
7644
7645 /* Implementation of `siginfo' variable. */
7646
7647 static const struct internalvar_funcs siginfo_funcs =
7648 {
7649 siginfo_make_value,
7650 NULL,
7651 NULL
7652 };
7653
7654 void
7655 _initialize_infrun (void)
7656 {
7657 int i;
7658 int numsigs;
7659 struct cmd_list_element *c;
7660
7661 add_info ("signals", signals_info, _("\
7662 What debugger does when program gets various signals.\n\
7663 Specify a signal as argument to print info on that signal only."));
7664 add_info_alias ("handle", "signals", 0);
7665
7666 c = add_com ("handle", class_run, handle_command, _("\
7667 Specify how to handle signals.\n\
7668 Usage: handle SIGNAL [ACTIONS]\n\
7669 Args are signals and actions to apply to those signals.\n\
7670 If no actions are specified, the current settings for the specified signals\n\
7671 will be displayed instead.\n\
7672 \n\
7673 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
7674 from 1-15 are allowed for compatibility with old versions of GDB.\n\
7675 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
7676 The special arg \"all\" is recognized to mean all signals except those\n\
7677 used by the debugger, typically SIGTRAP and SIGINT.\n\
7678 \n\
7679 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
7680 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
7681 Stop means reenter debugger if this signal happens (implies print).\n\
7682 Print means print a message if this signal happens.\n\
7683 Pass means let program see this signal; otherwise program doesn't know.\n\
7684 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
7685 Pass and Stop may be combined.\n\
7686 \n\
7687 Multiple signals may be specified. Signal numbers and signal names\n\
7688 may be interspersed with actions, with the actions being performed for\n\
7689 all signals cumulatively specified."));
7690 set_cmd_completer (c, handle_completer);
7691
7692 if (!dbx_commands)
7693 stop_command = add_cmd ("stop", class_obscure,
7694 not_just_help_class_command, _("\
7695 There is no `stop' command, but you can set a hook on `stop'.\n\
7696 This allows you to set a list of commands to be run each time execution\n\
7697 of the program stops."), &cmdlist);
7698
7699 add_setshow_zuinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
7700 Set inferior debugging."), _("\
7701 Show inferior debugging."), _("\
7702 When non-zero, inferior specific debugging is enabled."),
7703 NULL,
7704 show_debug_infrun,
7705 &setdebuglist, &showdebuglist);
7706
7707 add_setshow_boolean_cmd ("displaced", class_maintenance,
7708 &debug_displaced, _("\
7709 Set displaced stepping debugging."), _("\
7710 Show displaced stepping debugging."), _("\
7711 When non-zero, displaced stepping specific debugging is enabled."),
7712 NULL,
7713 show_debug_displaced,
7714 &setdebuglist, &showdebuglist);
7715
7716 add_setshow_boolean_cmd ("non-stop", no_class,
7717 &non_stop_1, _("\
7718 Set whether gdb controls the inferior in non-stop mode."), _("\
7719 Show whether gdb controls the inferior in non-stop mode."), _("\
7720 When debugging a multi-threaded program and this setting is\n\
7721 off (the default, also called all-stop mode), when one thread stops\n\
7722 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
7723 all other threads in the program while you interact with the thread of\n\
7724 interest. When you continue or step a thread, you can allow the other\n\
7725 threads to run, or have them remain stopped, but while you inspect any\n\
7726 thread's state, all threads stop.\n\
7727 \n\
7728 In non-stop mode, when one thread stops, other threads can continue\n\
7729 to run freely. You'll be able to step each thread independently,\n\
7730 leave it stopped or free to run as needed."),
7731 set_non_stop,
7732 show_non_stop,
7733 &setlist,
7734 &showlist);
7735
7736 numsigs = (int) GDB_SIGNAL_LAST;
7737 signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
7738 signal_print = (unsigned char *)
7739 xmalloc (sizeof (signal_print[0]) * numsigs);
7740 signal_program = (unsigned char *)
7741 xmalloc (sizeof (signal_program[0]) * numsigs);
7742 signal_catch = (unsigned char *)
7743 xmalloc (sizeof (signal_catch[0]) * numsigs);
7744 signal_pass = (unsigned char *)
7745 xmalloc (sizeof (signal_pass[0]) * numsigs);
7746 for (i = 0; i < numsigs; i++)
7747 {
7748 signal_stop[i] = 1;
7749 signal_print[i] = 1;
7750 signal_program[i] = 1;
7751 signal_catch[i] = 0;
7752 }
7753
7754 /* Signals caused by debugger's own actions
7755 should not be given to the program afterwards. */
7756 signal_program[GDB_SIGNAL_TRAP] = 0;
7757 signal_program[GDB_SIGNAL_INT] = 0;
7758
7759 /* Signals that are not errors should not normally enter the debugger. */
7760 signal_stop[GDB_SIGNAL_ALRM] = 0;
7761 signal_print[GDB_SIGNAL_ALRM] = 0;
7762 signal_stop[GDB_SIGNAL_VTALRM] = 0;
7763 signal_print[GDB_SIGNAL_VTALRM] = 0;
7764 signal_stop[GDB_SIGNAL_PROF] = 0;
7765 signal_print[GDB_SIGNAL_PROF] = 0;
7766 signal_stop[GDB_SIGNAL_CHLD] = 0;
7767 signal_print[GDB_SIGNAL_CHLD] = 0;
7768 signal_stop[GDB_SIGNAL_IO] = 0;
7769 signal_print[GDB_SIGNAL_IO] = 0;
7770 signal_stop[GDB_SIGNAL_POLL] = 0;
7771 signal_print[GDB_SIGNAL_POLL] = 0;
7772 signal_stop[GDB_SIGNAL_URG] = 0;
7773 signal_print[GDB_SIGNAL_URG] = 0;
7774 signal_stop[GDB_SIGNAL_WINCH] = 0;
7775 signal_print[GDB_SIGNAL_WINCH] = 0;
7776 signal_stop[GDB_SIGNAL_PRIO] = 0;
7777 signal_print[GDB_SIGNAL_PRIO] = 0;
7778
7779 /* These signals are used internally by user-level thread
7780 implementations. (See signal(5) on Solaris.) Like the above
7781 signals, a healthy program receives and handles them as part of
7782 its normal operation. */
7783 signal_stop[GDB_SIGNAL_LWP] = 0;
7784 signal_print[GDB_SIGNAL_LWP] = 0;
7785 signal_stop[GDB_SIGNAL_WAITING] = 0;
7786 signal_print[GDB_SIGNAL_WAITING] = 0;
7787 signal_stop[GDB_SIGNAL_CANCEL] = 0;
7788 signal_print[GDB_SIGNAL_CANCEL] = 0;
7789
7790 /* Update cached state. */
7791 signal_cache_update (-1);
7792
7793 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
7794 &stop_on_solib_events, _("\
7795 Set stopping for shared library events."), _("\
7796 Show stopping for shared library events."), _("\
7797 If nonzero, gdb will give control to the user when the dynamic linker\n\
7798 notifies gdb of shared library events. The most common event of interest\n\
7799 to the user would be loading/unloading of a new library."),
7800 set_stop_on_solib_events,
7801 show_stop_on_solib_events,
7802 &setlist, &showlist);
7803
7804 add_setshow_enum_cmd ("follow-fork-mode", class_run,
7805 follow_fork_mode_kind_names,
7806 &follow_fork_mode_string, _("\
7807 Set debugger response to a program call of fork or vfork."), _("\
7808 Show debugger response to a program call of fork or vfork."), _("\
7809 A fork or vfork creates a new process. follow-fork-mode can be:\n\
7810 parent - the original process is debugged after a fork\n\
7811 child - the new process is debugged after a fork\n\
7812 The unfollowed process will continue to run.\n\
7813 By default, the debugger will follow the parent process."),
7814 NULL,
7815 show_follow_fork_mode_string,
7816 &setlist, &showlist);
7817
7818 add_setshow_enum_cmd ("follow-exec-mode", class_run,
7819 follow_exec_mode_names,
7820 &follow_exec_mode_string, _("\
7821 Set debugger response to a program call of exec."), _("\
7822 Show debugger response to a program call of exec."), _("\
7823 An exec call replaces the program image of a process.\n\
7824 \n\
7825 follow-exec-mode can be:\n\
7826 \n\
7827 new - the debugger creates a new inferior and rebinds the process\n\
7828 to this new inferior. The program the process was running before\n\
7829 the exec call can be restarted afterwards by restarting the original\n\
7830 inferior.\n\
7831 \n\
7832 same - the debugger keeps the process bound to the same inferior.\n\
7833 The new executable image replaces the previous executable loaded in\n\
7834 the inferior. Restarting the inferior after the exec call restarts\n\
7835 the executable the process was running after the exec call.\n\
7836 \n\
7837 By default, the debugger will use the same inferior."),
7838 NULL,
7839 show_follow_exec_mode_string,
7840 &setlist, &showlist);
7841
7842 add_setshow_enum_cmd ("scheduler-locking", class_run,
7843 scheduler_enums, &scheduler_mode, _("\
7844 Set mode for locking scheduler during execution."), _("\
7845 Show mode for locking scheduler during execution."), _("\
7846 off == no locking (threads may preempt at any time)\n\
7847 on == full locking (no thread except the current thread may run)\n\
7848 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
7849 In this mode, other threads may run during other commands."),
7850 set_schedlock_func, /* traps on target vector */
7851 show_scheduler_mode,
7852 &setlist, &showlist);
7853
7854 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
7855 Set mode for resuming threads of all processes."), _("\
7856 Show mode for resuming threads of all processes."), _("\
7857 When on, execution commands (such as 'continue' or 'next') resume all\n\
7858 threads of all processes. When off (which is the default), execution\n\
7859 commands only resume the threads of the current process. The set of\n\
7860 threads that are resumed is further refined by the scheduler-locking\n\
7861 mode (see help set scheduler-locking)."),
7862 NULL,
7863 show_schedule_multiple,
7864 &setlist, &showlist);
7865
7866 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
7867 Set mode of the step operation."), _("\
7868 Show mode of the step operation."), _("\
7869 When set, doing a step over a function without debug line information\n\
7870 will stop at the first instruction of that function. Otherwise, the\n\
7871 function is skipped and the step command stops at a different source line."),
7872 NULL,
7873 show_step_stop_if_no_debug,
7874 &setlist, &showlist);
7875
7876 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
7877 &can_use_displaced_stepping, _("\
7878 Set debugger's willingness to use displaced stepping."), _("\
7879 Show debugger's willingness to use displaced stepping."), _("\
7880 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
7881 supported by the target architecture. If off, gdb will not use displaced\n\
7882 stepping to step over breakpoints, even if such is supported by the target\n\
7883 architecture. If auto (which is the default), gdb will use displaced stepping\n\
7884 if the target architecture supports it and non-stop mode is active, but will not\n\
7885 use it in all-stop mode (see help set non-stop)."),
7886 NULL,
7887 show_can_use_displaced_stepping,
7888 &setlist, &showlist);
7889
7890 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
7891 &exec_direction, _("Set direction of execution.\n\
7892 Options are 'forward' or 'reverse'."),
7893 _("Show direction of execution (forward/reverse)."),
7894 _("Tells gdb whether to execute forward or backward."),
7895 set_exec_direction_func, show_exec_direction_func,
7896 &setlist, &showlist);
7897
7898 /* Set/show detach-on-fork: user-settable mode. */
7899
7900 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
7901 Set whether gdb will detach the child of a fork."), _("\
7902 Show whether gdb will detach the child of a fork."), _("\
7903 Tells gdb whether to detach the child of a fork."),
7904 NULL, NULL, &setlist, &showlist);
7905
7906 /* Set/show disable address space randomization mode. */
7907
7908 add_setshow_boolean_cmd ("disable-randomization", class_support,
7909 &disable_randomization, _("\
7910 Set disabling of debuggee's virtual address space randomization."), _("\
7911 Show disabling of debuggee's virtual address space randomization."), _("\
7912 When this mode is on (which is the default), randomization of the virtual\n\
7913 address space is disabled. Standalone programs run with the randomization\n\
7914 enabled by default on some platforms."),
7915 &set_disable_randomization,
7916 &show_disable_randomization,
7917 &setlist, &showlist);
7918
7919 /* ptid initializations */
7920 inferior_ptid = null_ptid;
7921 target_last_wait_ptid = minus_one_ptid;
7922
7923 observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
7924 observer_attach_thread_stop_requested (infrun_thread_stop_requested);
7925 observer_attach_thread_exit (infrun_thread_thread_exit);
7926 observer_attach_inferior_exit (infrun_inferior_exit);
7927
7928 /* Explicitly create without lookup, since that tries to create a
7929 value with a void typed value, and when we get here, gdbarch
7930 isn't initialized yet. At this point, we're quite sure there
7931 isn't another convenience variable of the same name. */
7932 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
7933
7934 add_setshow_boolean_cmd ("observer", no_class,
7935 &observer_mode_1, _("\
7936 Set whether gdb controls the inferior in observer mode."), _("\
7937 Show whether gdb controls the inferior in observer mode."), _("\
7938 In observer mode, GDB can get data from the inferior, but not\n\
7939 affect its execution. Registers and memory may not be changed,\n\
7940 breakpoints may not be set, and the program cannot be interrupted\n\
7941 or signalled."),
7942 set_observer_mode,
7943 show_observer_mode,
7944 &setlist,
7945 &showlist);
7946 }
This page took 0.194131 seconds and 5 git commands to generate.