1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
36 #include "gdb_assert.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
49 static void target_info (char *, int);
51 static void default_terminal_info (struct target_ops
*, const char *, int);
53 static int default_watchpoint_addr_within_range (struct target_ops
*,
54 CORE_ADDR
, CORE_ADDR
, int);
56 static int default_region_ok_for_hw_watchpoint (struct target_ops
*,
59 static void default_rcmd (struct target_ops
*, char *, struct ui_file
*);
61 static ptid_t
default_get_ada_task_ptid (struct target_ops
*self
,
64 static int default_follow_fork (struct target_ops
*self
, int follow_child
,
67 static void default_mourn_inferior (struct target_ops
*self
);
69 static void tcomplain (void) ATTRIBUTE_NORETURN
;
71 static int nomemory (CORE_ADDR
, char *, int, int, struct target_ops
*);
73 static int return_zero (void);
75 void target_ignore (void);
77 static void target_command (char *, int);
79 static struct target_ops
*find_default_run_target (char *);
81 static target_xfer_partial_ftype default_xfer_partial
;
83 static struct gdbarch
*default_thread_architecture (struct target_ops
*ops
,
86 static int dummy_find_memory_regions (struct target_ops
*self
,
87 find_memory_region_ftype ignore1
,
90 static char *dummy_make_corefile_notes (struct target_ops
*self
,
91 bfd
*ignore1
, int *ignore2
);
93 static char *default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
);
95 static int find_default_can_async_p (struct target_ops
*ignore
);
97 static int find_default_is_async_p (struct target_ops
*ignore
);
99 static enum exec_direction_kind default_execution_direction
100 (struct target_ops
*self
);
102 #include "target-delegates.c"
104 static void init_dummy_target (void);
106 static struct target_ops debug_target
;
108 static void debug_to_open (char *, int);
110 static void debug_to_prepare_to_store (struct target_ops
*self
,
113 static void debug_to_files_info (struct target_ops
*);
115 static int debug_to_insert_breakpoint (struct target_ops
*, struct gdbarch
*,
116 struct bp_target_info
*);
118 static int debug_to_remove_breakpoint (struct target_ops
*, struct gdbarch
*,
119 struct bp_target_info
*);
121 static int debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
124 static int debug_to_insert_hw_breakpoint (struct target_ops
*self
,
126 struct bp_target_info
*);
128 static int debug_to_remove_hw_breakpoint (struct target_ops
*self
,
130 struct bp_target_info
*);
132 static int debug_to_insert_watchpoint (struct target_ops
*self
,
134 struct expression
*);
136 static int debug_to_remove_watchpoint (struct target_ops
*self
,
138 struct expression
*);
140 static int debug_to_stopped_data_address (struct target_ops
*, CORE_ADDR
*);
142 static int debug_to_watchpoint_addr_within_range (struct target_ops
*,
143 CORE_ADDR
, CORE_ADDR
, int);
145 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
148 static int debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
150 struct expression
*);
152 static void debug_to_terminal_init (struct target_ops
*self
);
154 static void debug_to_terminal_inferior (struct target_ops
*self
);
156 static void debug_to_terminal_ours_for_output (struct target_ops
*self
);
158 static void debug_to_terminal_save_ours (struct target_ops
*self
);
160 static void debug_to_terminal_ours (struct target_ops
*self
);
162 static void debug_to_load (struct target_ops
*self
, char *, int);
164 static int debug_to_can_run (struct target_ops
*self
);
166 static void debug_to_stop (struct target_ops
*self
, ptid_t
);
168 /* Pointer to array of target architecture structures; the size of the
169 array; the current index into the array; the allocated size of the
171 struct target_ops
**target_structs
;
172 unsigned target_struct_size
;
173 unsigned target_struct_allocsize
;
174 #define DEFAULT_ALLOCSIZE 10
176 /* The initial current target, so that there is always a semi-valid
179 static struct target_ops dummy_target
;
181 /* Top of target stack. */
183 static struct target_ops
*target_stack
;
185 /* The target structure we are currently using to talk to a process
186 or file or whatever "inferior" we have. */
188 struct target_ops current_target
;
190 /* Command list for target. */
192 static struct cmd_list_element
*targetlist
= NULL
;
194 /* Nonzero if we should trust readonly sections from the
195 executable when reading memory. */
197 static int trust_readonly
= 0;
199 /* Nonzero if we should show true memory content including
200 memory breakpoint inserted by gdb. */
202 static int show_memory_breakpoints
= 0;
204 /* These globals control whether GDB attempts to perform these
205 operations; they are useful for targets that need to prevent
206 inadvertant disruption, such as in non-stop mode. */
208 int may_write_registers
= 1;
210 int may_write_memory
= 1;
212 int may_insert_breakpoints
= 1;
214 int may_insert_tracepoints
= 1;
216 int may_insert_fast_tracepoints
= 1;
220 /* Non-zero if we want to see trace of target level stuff. */
222 static unsigned int targetdebug
= 0;
224 show_targetdebug (struct ui_file
*file
, int from_tty
,
225 struct cmd_list_element
*c
, const char *value
)
227 fprintf_filtered (file
, _("Target debugging is %s.\n"), value
);
230 static void setup_target_debug (void);
232 /* The user just typed 'target' without the name of a target. */
235 target_command (char *arg
, int from_tty
)
237 fputs_filtered ("Argument required (target name). Try `help target'\n",
241 /* Default target_has_* methods for process_stratum targets. */
244 default_child_has_all_memory (struct target_ops
*ops
)
246 /* If no inferior selected, then we can't read memory here. */
247 if (ptid_equal (inferior_ptid
, null_ptid
))
254 default_child_has_memory (struct target_ops
*ops
)
256 /* If no inferior selected, then we can't read memory here. */
257 if (ptid_equal (inferior_ptid
, null_ptid
))
264 default_child_has_stack (struct target_ops
*ops
)
266 /* If no inferior selected, there's no stack. */
267 if (ptid_equal (inferior_ptid
, null_ptid
))
274 default_child_has_registers (struct target_ops
*ops
)
276 /* Can't read registers from no inferior. */
277 if (ptid_equal (inferior_ptid
, null_ptid
))
284 default_child_has_execution (struct target_ops
*ops
, ptid_t the_ptid
)
286 /* If there's no thread selected, then we can't make it run through
288 if (ptid_equal (the_ptid
, null_ptid
))
296 target_has_all_memory_1 (void)
298 struct target_ops
*t
;
300 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
301 if (t
->to_has_all_memory (t
))
308 target_has_memory_1 (void)
310 struct target_ops
*t
;
312 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
313 if (t
->to_has_memory (t
))
320 target_has_stack_1 (void)
322 struct target_ops
*t
;
324 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
325 if (t
->to_has_stack (t
))
332 target_has_registers_1 (void)
334 struct target_ops
*t
;
336 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
337 if (t
->to_has_registers (t
))
344 target_has_execution_1 (ptid_t the_ptid
)
346 struct target_ops
*t
;
348 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
349 if (t
->to_has_execution (t
, the_ptid
))
356 target_has_execution_current (void)
358 return target_has_execution_1 (inferior_ptid
);
361 /* Complete initialization of T. This ensures that various fields in
362 T are set, if needed by the target implementation. */
365 complete_target_initialization (struct target_ops
*t
)
367 /* Provide default values for all "must have" methods. */
368 if (t
->to_xfer_partial
== NULL
)
369 t
->to_xfer_partial
= default_xfer_partial
;
371 if (t
->to_has_all_memory
== NULL
)
372 t
->to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
374 if (t
->to_has_memory
== NULL
)
375 t
->to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
377 if (t
->to_has_stack
== NULL
)
378 t
->to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
380 if (t
->to_has_registers
== NULL
)
381 t
->to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
383 if (t
->to_has_execution
== NULL
)
384 t
->to_has_execution
= (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
386 install_delegators (t
);
389 /* Add possible target architecture T to the list and add a new
390 command 'target T->to_shortname'. Set COMPLETER as the command's
391 completer if not NULL. */
394 add_target_with_completer (struct target_ops
*t
,
395 completer_ftype
*completer
)
397 struct cmd_list_element
*c
;
399 complete_target_initialization (t
);
403 target_struct_allocsize
= DEFAULT_ALLOCSIZE
;
404 target_structs
= (struct target_ops
**) xmalloc
405 (target_struct_allocsize
* sizeof (*target_structs
));
407 if (target_struct_size
>= target_struct_allocsize
)
409 target_struct_allocsize
*= 2;
410 target_structs
= (struct target_ops
**)
411 xrealloc ((char *) target_structs
,
412 target_struct_allocsize
* sizeof (*target_structs
));
414 target_structs
[target_struct_size
++] = t
;
416 if (targetlist
== NULL
)
417 add_prefix_cmd ("target", class_run
, target_command
, _("\
418 Connect to a target machine or process.\n\
419 The first argument is the type or protocol of the target machine.\n\
420 Remaining arguments are interpreted by the target protocol. For more\n\
421 information on the arguments for a particular protocol, type\n\
422 `help target ' followed by the protocol name."),
423 &targetlist
, "target ", 0, &cmdlist
);
424 c
= add_cmd (t
->to_shortname
, no_class
, t
->to_open
, t
->to_doc
,
426 if (completer
!= NULL
)
427 set_cmd_completer (c
, completer
);
430 /* Add a possible target architecture to the list. */
433 add_target (struct target_ops
*t
)
435 add_target_with_completer (t
, NULL
);
441 add_deprecated_target_alias (struct target_ops
*t
, char *alias
)
443 struct cmd_list_element
*c
;
446 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
448 c
= add_cmd (alias
, no_class
, t
->to_open
, t
->to_doc
, &targetlist
);
449 alt
= xstrprintf ("target %s", t
->to_shortname
);
450 deprecate_cmd (c
, alt
);
464 fprintf_unfiltered (gdb_stdlog
, "target_kill ()\n");
466 current_target
.to_kill (¤t_target
);
470 target_load (char *arg
, int from_tty
)
472 target_dcache_invalidate ();
473 (*current_target
.to_load
) (¤t_target
, arg
, from_tty
);
477 target_create_inferior (char *exec_file
, char *args
,
478 char **env
, int from_tty
)
480 struct target_ops
*t
;
482 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
484 if (t
->to_create_inferior
!= NULL
)
486 t
->to_create_inferior (t
, exec_file
, args
, env
, from_tty
);
488 fprintf_unfiltered (gdb_stdlog
,
489 "target_create_inferior (%s, %s, xxx, %d)\n",
490 exec_file
, args
, from_tty
);
495 internal_error (__FILE__
, __LINE__
,
496 _("could not find a target to create inferior"));
500 target_terminal_inferior (void)
502 /* A background resume (``run&'') should leave GDB in control of the
503 terminal. Use target_can_async_p, not target_is_async_p, since at
504 this point the target is not async yet. However, if sync_execution
505 is not set, we know it will become async prior to resume. */
506 if (target_can_async_p () && !sync_execution
)
509 /* If GDB is resuming the inferior in the foreground, install
510 inferior's terminal modes. */
511 (*current_target
.to_terminal_inferior
) (¤t_target
);
515 nomemory (CORE_ADDR memaddr
, char *myaddr
, int len
, int write
,
516 struct target_ops
*t
)
518 errno
= EIO
; /* Can't read/write this location. */
519 return 0; /* No bytes handled. */
525 error (_("You can't do that when your target is `%s'"),
526 current_target
.to_shortname
);
532 error (_("You can't do that without a process to debug."));
536 default_terminal_info (struct target_ops
*self
, const char *args
, int from_tty
)
538 printf_unfiltered (_("No saved terminal information.\n"));
541 /* A default implementation for the to_get_ada_task_ptid target method.
543 This function builds the PTID by using both LWP and TID as part of
544 the PTID lwp and tid elements. The pid used is the pid of the
548 default_get_ada_task_ptid (struct target_ops
*self
, long lwp
, long tid
)
550 return ptid_build (ptid_get_pid (inferior_ptid
), lwp
, tid
);
553 static enum exec_direction_kind
554 default_execution_direction (struct target_ops
*self
)
556 if (!target_can_execute_reverse
)
558 else if (!target_can_async_p ())
561 gdb_assert_not_reached ("\
562 to_execution_direction must be implemented for reverse async");
565 /* Go through the target stack from top to bottom, copying over zero
566 entries in current_target, then filling in still empty entries. In
567 effect, we are doing class inheritance through the pushed target
570 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
571 is currently implemented, is that it discards any knowledge of
572 which target an inherited method originally belonged to.
573 Consequently, new new target methods should instead explicitly and
574 locally search the target stack for the target that can handle the
578 update_current_target (void)
580 struct target_ops
*t
;
582 /* First, reset current's contents. */
583 memset (¤t_target
, 0, sizeof (current_target
));
585 /* Install the delegators. */
586 install_delegators (¤t_target
);
588 #define INHERIT(FIELD, TARGET) \
589 if (!current_target.FIELD) \
590 current_target.FIELD = (TARGET)->FIELD
592 for (t
= target_stack
; t
; t
= t
->beneath
)
594 INHERIT (to_shortname
, t
);
595 INHERIT (to_longname
, t
);
597 /* Do not inherit to_open. */
598 /* Do not inherit to_close. */
599 /* Do not inherit to_attach. */
600 /* Do not inherit to_post_attach. */
601 INHERIT (to_attach_no_wait
, t
);
602 /* Do not inherit to_detach. */
603 /* Do not inherit to_disconnect. */
604 /* Do not inherit to_resume. */
605 /* Do not inherit to_wait. */
606 /* Do not inherit to_fetch_registers. */
607 /* Do not inherit to_store_registers. */
608 /* Do not inherit to_prepare_to_store. */
609 INHERIT (deprecated_xfer_memory
, t
);
610 /* Do not inherit to_files_info. */
611 /* Do not inherit to_insert_breakpoint. */
612 /* Do not inherit to_remove_breakpoint. */
613 /* Do not inherit to_can_use_hw_breakpoint. */
614 /* Do not inherit to_insert_hw_breakpoint. */
615 /* Do not inherit to_remove_hw_breakpoint. */
616 /* Do not inherit to_ranged_break_num_registers. */
617 /* Do not inherit to_insert_watchpoint. */
618 /* Do not inherit to_remove_watchpoint. */
619 /* Do not inherit to_insert_mask_watchpoint. */
620 /* Do not inherit to_remove_mask_watchpoint. */
621 /* Do not inherit to_stopped_data_address. */
622 INHERIT (to_have_steppable_watchpoint
, t
);
623 INHERIT (to_have_continuable_watchpoint
, t
);
624 /* Do not inherit to_stopped_by_watchpoint. */
625 /* Do not inherit to_watchpoint_addr_within_range. */
626 /* Do not inherit to_region_ok_for_hw_watchpoint. */
627 /* Do not inherit to_can_accel_watchpoint_condition. */
628 /* Do not inherit to_masked_watch_num_registers. */
629 /* Do not inherit to_terminal_init. */
630 /* Do not inherit to_terminal_inferior. */
631 /* Do not inherit to_terminal_ours_for_output. */
632 /* Do not inherit to_terminal_ours. */
633 /* Do not inherit to_terminal_save_ours. */
634 /* Do not inherit to_terminal_info. */
635 /* Do not inherit to_kill. */
636 /* Do not inherit to_load. */
637 /* Do no inherit to_create_inferior. */
638 /* Do not inherit to_post_startup_inferior. */
639 /* Do not inherit to_insert_fork_catchpoint. */
640 /* Do not inherit to_remove_fork_catchpoint. */
641 /* Do not inherit to_insert_vfork_catchpoint. */
642 /* Do not inherit to_remove_vfork_catchpoint. */
643 /* Do not inherit to_follow_fork. */
644 /* Do not inherit to_insert_exec_catchpoint. */
645 /* Do not inherit to_remove_exec_catchpoint. */
646 /* Do not inherit to_set_syscall_catchpoint. */
647 /* Do not inherit to_has_exited. */
648 /* Do not inherit to_mourn_inferior. */
649 INHERIT (to_can_run
, t
);
650 /* Do not inherit to_pass_signals. */
651 /* Do not inherit to_program_signals. */
652 /* Do not inherit to_thread_alive. */
653 /* Do not inherit to_find_new_threads. */
654 /* Do not inherit to_pid_to_str. */
655 /* Do not inherit to_extra_thread_info. */
656 /* Do not inherit to_thread_name. */
657 /* Do not inherit to_stop. */
658 /* Do not inherit to_xfer_partial. */
659 /* Do not inherit to_rcmd. */
660 /* Do not inherit to_pid_to_exec_file. */
661 /* Do not inherit to_log_command. */
662 INHERIT (to_stratum
, t
);
663 /* Do not inherit to_has_all_memory. */
664 /* Do not inherit to_has_memory. */
665 /* Do not inherit to_has_stack. */
666 /* Do not inherit to_has_registers. */
667 /* Do not inherit to_has_execution. */
668 INHERIT (to_has_thread_control
, t
);
669 /* Do not inherit to_can_async_p. */
670 /* Do not inherit to_is_async_p. */
671 /* Do not inherit to_async. */
672 /* Do not inherit to_find_memory_regions. */
673 /* Do not inherit to_make_corefile_notes. */
674 /* Do not inherit to_get_bookmark. */
675 /* Do not inherit to_goto_bookmark. */
676 /* Do not inherit to_get_thread_local_address. */
677 /* Do not inherit to_can_execute_reverse. */
678 /* Do not inherit to_execution_direction. */
679 /* Do not inherit to_thread_architecture. */
680 /* Do not inherit to_read_description. */
681 /* Do not inherit to_get_ada_task_ptid. */
682 /* Do not inherit to_search_memory. */
683 /* Do not inherit to_supports_multi_process. */
684 /* Do not inherit to_supports_enable_disable_tracepoint. */
685 /* Do not inherit to_supports_string_tracing. */
686 /* Do not inherit to_trace_init. */
687 /* Do not inherit to_download_tracepoint. */
688 /* Do not inherit to_can_download_tracepoint. */
689 /* Do not inherit to_download_trace_state_variable. */
690 /* Do not inherit to_enable_tracepoint. */
691 /* Do not inherit to_disable_tracepoint. */
692 /* Do not inherit to_trace_set_readonly_regions. */
693 /* Do not inherit to_trace_start. */
694 /* Do not inherit to_get_trace_status. */
695 /* Do not inherit to_get_tracepoint_status. */
696 /* Do not inherit to_trace_stop. */
697 /* Do not inherit to_trace_find. */
698 /* Do not inherit to_get_trace_state_variable_value. */
699 /* Do not inherit to_save_trace_data. */
700 /* Do not inherit to_upload_tracepoints. */
701 /* Do not inherit to_upload_trace_state_variables. */
702 /* Do not inherit to_get_raw_trace_data. */
703 /* Do not inherit to_get_min_fast_tracepoint_insn_len. */
704 /* Do not inherit to_set_disconnected_tracing. */
705 /* Do not inherit to_set_circular_trace_buffer. */
706 /* Do not inherit to_set_trace_buffer_size. */
707 /* Do not inherit to_set_trace_notes. */
708 /* Do not inherit to_get_tib_address. */
709 /* Do not inherit to_set_permissions. */
710 /* Do not inherit to_static_tracepoint_marker_at. */
711 /* Do not inherit to_static_tracepoint_markers_by_strid. */
712 /* Do not inherit to_traceframe_info. */
713 /* Do not inherit to_use_agent. */
714 /* Do not inherit to_can_use_agent. */
715 /* Do not inherit to_augmented_libraries_svr4_read. */
716 INHERIT (to_magic
, t
);
718 to_supports_evaluation_of_breakpoint_conditions. */
719 /* Do not inherit to_can_run_breakpoint_commands. */
720 /* Do not inherit to_memory_map. */
721 /* Do not inherit to_flash_erase. */
722 /* Do not inherit to_flash_done. */
726 /* Clean up a target struct so it no longer has any zero pointers in
727 it. Some entries are defaulted to a method that print an error,
728 others are hard-wired to a standard recursive default. */
730 #define de_fault(field, value) \
731 if (!current_target.field) \
732 current_target.field = value
735 (void (*) (char *, int))
738 (void (*) (struct target_ops
*))
740 de_fault (deprecated_xfer_memory
,
741 (int (*) (CORE_ADDR
, gdb_byte
*, int, int,
742 struct mem_attrib
*, struct target_ops
*))
744 de_fault (to_can_run
,
745 (int (*) (struct target_ops
*))
747 current_target
.to_read_description
= NULL
;
751 /* Finally, position the target-stack beneath the squashed
752 "current_target". That way code looking for a non-inherited
753 target method can quickly and simply find it. */
754 current_target
.beneath
= target_stack
;
757 setup_target_debug ();
760 /* Push a new target type into the stack of the existing target accessors,
761 possibly superseding some of the existing accessors.
763 Rather than allow an empty stack, we always have the dummy target at
764 the bottom stratum, so we can call the function vectors without
768 push_target (struct target_ops
*t
)
770 struct target_ops
**cur
;
772 /* Check magic number. If wrong, it probably means someone changed
773 the struct definition, but not all the places that initialize one. */
774 if (t
->to_magic
!= OPS_MAGIC
)
776 fprintf_unfiltered (gdb_stderr
,
777 "Magic number of %s target struct wrong\n",
779 internal_error (__FILE__
, __LINE__
,
780 _("failed internal consistency check"));
783 /* Find the proper stratum to install this target in. */
784 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
786 if ((int) (t
->to_stratum
) >= (int) (*cur
)->to_stratum
)
790 /* If there's already targets at this stratum, remove them. */
791 /* FIXME: cagney/2003-10-15: I think this should be popping all
792 targets to CUR, and not just those at this stratum level. */
793 while ((*cur
) != NULL
&& t
->to_stratum
== (*cur
)->to_stratum
)
795 /* There's already something at this stratum level. Close it,
796 and un-hook it from the stack. */
797 struct target_ops
*tmp
= (*cur
);
799 (*cur
) = (*cur
)->beneath
;
804 /* We have removed all targets in our stratum, now add the new one. */
808 update_current_target ();
811 /* Remove a target_ops vector from the stack, wherever it may be.
812 Return how many times it was removed (0 or 1). */
815 unpush_target (struct target_ops
*t
)
817 struct target_ops
**cur
;
818 struct target_ops
*tmp
;
820 if (t
->to_stratum
== dummy_stratum
)
821 internal_error (__FILE__
, __LINE__
,
822 _("Attempt to unpush the dummy target"));
824 /* Look for the specified target. Note that we assume that a target
825 can only occur once in the target stack. */
827 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
833 /* If we don't find target_ops, quit. Only open targets should be
838 /* Unchain the target. */
840 (*cur
) = (*cur
)->beneath
;
843 update_current_target ();
845 /* Finally close the target. Note we do this after unchaining, so
846 any target method calls from within the target_close
847 implementation don't end up in T anymore. */
854 pop_all_targets_above (enum strata above_stratum
)
856 while ((int) (current_target
.to_stratum
) > (int) above_stratum
)
858 if (!unpush_target (target_stack
))
860 fprintf_unfiltered (gdb_stderr
,
861 "pop_all_targets couldn't find target %s\n",
862 target_stack
->to_shortname
);
863 internal_error (__FILE__
, __LINE__
,
864 _("failed internal consistency check"));
871 pop_all_targets (void)
873 pop_all_targets_above (dummy_stratum
);
876 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
879 target_is_pushed (struct target_ops
*t
)
881 struct target_ops
**cur
;
883 /* Check magic number. If wrong, it probably means someone changed
884 the struct definition, but not all the places that initialize one. */
885 if (t
->to_magic
!= OPS_MAGIC
)
887 fprintf_unfiltered (gdb_stderr
,
888 "Magic number of %s target struct wrong\n",
890 internal_error (__FILE__
, __LINE__
,
891 _("failed internal consistency check"));
894 for (cur
= &target_stack
; (*cur
) != NULL
; cur
= &(*cur
)->beneath
)
901 /* Using the objfile specified in OBJFILE, find the address for the
902 current thread's thread-local storage with offset OFFSET. */
904 target_translate_tls_address (struct objfile
*objfile
, CORE_ADDR offset
)
906 volatile CORE_ADDR addr
= 0;
907 struct target_ops
*target
;
909 for (target
= current_target
.beneath
;
911 target
= target
->beneath
)
913 if (target
->to_get_thread_local_address
!= NULL
)
918 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
920 ptid_t ptid
= inferior_ptid
;
921 volatile struct gdb_exception ex
;
923 TRY_CATCH (ex
, RETURN_MASK_ALL
)
927 /* Fetch the load module address for this objfile. */
928 lm_addr
= gdbarch_fetch_tls_load_module_address (target_gdbarch (),
930 /* If it's 0, throw the appropriate exception. */
932 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR
,
933 _("TLS load module not found"));
935 addr
= target
->to_get_thread_local_address (target
, ptid
,
938 /* If an error occurred, print TLS related messages here. Otherwise,
939 throw the error to some higher catcher. */
942 int objfile_is_library
= (objfile
->flags
& OBJF_SHARED
);
946 case TLS_NO_LIBRARY_SUPPORT_ERROR
:
947 error (_("Cannot find thread-local variables "
948 "in this thread library."));
950 case TLS_LOAD_MODULE_NOT_FOUND_ERROR
:
951 if (objfile_is_library
)
952 error (_("Cannot find shared library `%s' in dynamic"
953 " linker's load module list"), objfile_name (objfile
));
955 error (_("Cannot find executable file `%s' in dynamic"
956 " linker's load module list"), objfile_name (objfile
));
958 case TLS_NOT_ALLOCATED_YET_ERROR
:
959 if (objfile_is_library
)
960 error (_("The inferior has not yet allocated storage for"
961 " thread-local variables in\n"
962 "the shared library `%s'\n"
964 objfile_name (objfile
), target_pid_to_str (ptid
));
966 error (_("The inferior has not yet allocated storage for"
967 " thread-local variables in\n"
968 "the executable `%s'\n"
970 objfile_name (objfile
), target_pid_to_str (ptid
));
972 case TLS_GENERIC_ERROR
:
973 if (objfile_is_library
)
974 error (_("Cannot find thread-local storage for %s, "
975 "shared library %s:\n%s"),
976 target_pid_to_str (ptid
),
977 objfile_name (objfile
), ex
.message
);
979 error (_("Cannot find thread-local storage for %s, "
980 "executable file %s:\n%s"),
981 target_pid_to_str (ptid
),
982 objfile_name (objfile
), ex
.message
);
985 throw_exception (ex
);
990 /* It wouldn't be wrong here to try a gdbarch method, too; finding
991 TLS is an ABI-specific thing. But we don't do that yet. */
993 error (_("Cannot find thread-local variables on this target"));
999 target_xfer_status_to_string (enum target_xfer_status err
)
1001 #define CASE(X) case X: return #X
1004 CASE(TARGET_XFER_E_IO
);
1005 CASE(TARGET_XFER_E_UNAVAILABLE
);
1014 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1016 /* target_read_string -- read a null terminated string, up to LEN bytes,
1017 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1018 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1019 is responsible for freeing it. Return the number of bytes successfully
1023 target_read_string (CORE_ADDR memaddr
, char **string
, int len
, int *errnop
)
1025 int tlen
, offset
, i
;
1029 int buffer_allocated
;
1031 unsigned int nbytes_read
= 0;
1033 gdb_assert (string
);
1035 /* Small for testing. */
1036 buffer_allocated
= 4;
1037 buffer
= xmalloc (buffer_allocated
);
1042 tlen
= MIN (len
, 4 - (memaddr
& 3));
1043 offset
= memaddr
& 3;
1045 errcode
= target_read_memory (memaddr
& ~3, buf
, sizeof buf
);
1048 /* The transfer request might have crossed the boundary to an
1049 unallocated region of memory. Retry the transfer, requesting
1053 errcode
= target_read_memory (memaddr
, buf
, 1);
1058 if (bufptr
- buffer
+ tlen
> buffer_allocated
)
1062 bytes
= bufptr
- buffer
;
1063 buffer_allocated
*= 2;
1064 buffer
= xrealloc (buffer
, buffer_allocated
);
1065 bufptr
= buffer
+ bytes
;
1068 for (i
= 0; i
< tlen
; i
++)
1070 *bufptr
++ = buf
[i
+ offset
];
1071 if (buf
[i
+ offset
] == '\000')
1073 nbytes_read
+= i
+ 1;
1080 nbytes_read
+= tlen
;
1089 struct target_section_table
*
1090 target_get_section_table (struct target_ops
*target
)
1093 fprintf_unfiltered (gdb_stdlog
, "target_get_section_table ()\n");
1095 return (*target
->to_get_section_table
) (target
);
1098 /* Find a section containing ADDR. */
1100 struct target_section
*
1101 target_section_by_addr (struct target_ops
*target
, CORE_ADDR addr
)
1103 struct target_section_table
*table
= target_get_section_table (target
);
1104 struct target_section
*secp
;
1109 for (secp
= table
->sections
; secp
< table
->sections_end
; secp
++)
1111 if (addr
>= secp
->addr
&& addr
< secp
->endaddr
)
1117 /* Read memory from the live target, even if currently inspecting a
1118 traceframe. The return is the same as that of target_read. */
1120 static enum target_xfer_status
1121 target_read_live_memory (enum target_object object
,
1122 ULONGEST memaddr
, gdb_byte
*myaddr
, ULONGEST len
,
1123 ULONGEST
*xfered_len
)
1125 enum target_xfer_status ret
;
1126 struct cleanup
*cleanup
;
1128 /* Switch momentarily out of tfind mode so to access live memory.
1129 Note that this must not clear global state, such as the frame
1130 cache, which must still remain valid for the previous traceframe.
1131 We may be _building_ the frame cache at this point. */
1132 cleanup
= make_cleanup_restore_traceframe_number ();
1133 set_traceframe_number (-1);
1135 ret
= target_xfer_partial (current_target
.beneath
, object
, NULL
,
1136 myaddr
, NULL
, memaddr
, len
, xfered_len
);
1138 do_cleanups (cleanup
);
1142 /* Using the set of read-only target sections of OPS, read live
1143 read-only memory. Note that the actual reads start from the
1144 top-most target again.
1146 For interface/parameters/return description see target.h,
1149 static enum target_xfer_status
1150 memory_xfer_live_readonly_partial (struct target_ops
*ops
,
1151 enum target_object object
,
1152 gdb_byte
*readbuf
, ULONGEST memaddr
,
1153 ULONGEST len
, ULONGEST
*xfered_len
)
1155 struct target_section
*secp
;
1156 struct target_section_table
*table
;
1158 secp
= target_section_by_addr (ops
, memaddr
);
1160 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1161 secp
->the_bfd_section
)
1164 struct target_section
*p
;
1165 ULONGEST memend
= memaddr
+ len
;
1167 table
= target_get_section_table (ops
);
1169 for (p
= table
->sections
; p
< table
->sections_end
; p
++)
1171 if (memaddr
>= p
->addr
)
1173 if (memend
<= p
->endaddr
)
1175 /* Entire transfer is within this section. */
1176 return target_read_live_memory (object
, memaddr
,
1177 readbuf
, len
, xfered_len
);
1179 else if (memaddr
>= p
->endaddr
)
1181 /* This section ends before the transfer starts. */
1186 /* This section overlaps the transfer. Just do half. */
1187 len
= p
->endaddr
- memaddr
;
1188 return target_read_live_memory (object
, memaddr
,
1189 readbuf
, len
, xfered_len
);
1195 return TARGET_XFER_EOF
;
1198 /* Read memory from more than one valid target. A core file, for
1199 instance, could have some of memory but delegate other bits to
1200 the target below it. So, we must manually try all targets. */
1202 static enum target_xfer_status
1203 raw_memory_xfer_partial (struct target_ops
*ops
, gdb_byte
*readbuf
,
1204 const gdb_byte
*writebuf
, ULONGEST memaddr
, LONGEST len
,
1205 ULONGEST
*xfered_len
)
1207 enum target_xfer_status res
;
1211 res
= ops
->to_xfer_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1212 readbuf
, writebuf
, memaddr
, len
,
1214 if (res
== TARGET_XFER_OK
)
1217 /* Stop if the target reports that the memory is not available. */
1218 if (res
== TARGET_XFER_E_UNAVAILABLE
)
1221 /* We want to continue past core files to executables, but not
1222 past a running target's memory. */
1223 if (ops
->to_has_all_memory (ops
))
1228 while (ops
!= NULL
);
1233 /* Perform a partial memory transfer.
1234 For docs see target.h, to_xfer_partial. */
1236 static enum target_xfer_status
1237 memory_xfer_partial_1 (struct target_ops
*ops
, enum target_object object
,
1238 gdb_byte
*readbuf
, const gdb_byte
*writebuf
, ULONGEST memaddr
,
1239 ULONGEST len
, ULONGEST
*xfered_len
)
1241 enum target_xfer_status res
;
1243 struct mem_region
*region
;
1244 struct inferior
*inf
;
1246 /* For accesses to unmapped overlay sections, read directly from
1247 files. Must do this first, as MEMADDR may need adjustment. */
1248 if (readbuf
!= NULL
&& overlay_debugging
)
1250 struct obj_section
*section
= find_pc_overlay (memaddr
);
1252 if (pc_in_unmapped_range (memaddr
, section
))
1254 struct target_section_table
*table
1255 = target_get_section_table (ops
);
1256 const char *section_name
= section
->the_bfd_section
->name
;
1258 memaddr
= overlay_mapped_address (memaddr
, section
);
1259 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1260 memaddr
, len
, xfered_len
,
1262 table
->sections_end
,
1267 /* Try the executable files, if "trust-readonly-sections" is set. */
1268 if (readbuf
!= NULL
&& trust_readonly
)
1270 struct target_section
*secp
;
1271 struct target_section_table
*table
;
1273 secp
= target_section_by_addr (ops
, memaddr
);
1275 && (bfd_get_section_flags (secp
->the_bfd_section
->owner
,
1276 secp
->the_bfd_section
)
1279 table
= target_get_section_table (ops
);
1280 return section_table_xfer_memory_partial (readbuf
, writebuf
,
1281 memaddr
, len
, xfered_len
,
1283 table
->sections_end
,
1288 /* If reading unavailable memory in the context of traceframes, and
1289 this address falls within a read-only section, fallback to
1290 reading from live memory. */
1291 if (readbuf
!= NULL
&& get_traceframe_number () != -1)
1293 VEC(mem_range_s
) *available
;
1295 /* If we fail to get the set of available memory, then the
1296 target does not support querying traceframe info, and so we
1297 attempt reading from the traceframe anyway (assuming the
1298 target implements the old QTro packet then). */
1299 if (traceframe_available_memory (&available
, memaddr
, len
))
1301 struct cleanup
*old_chain
;
1303 old_chain
= make_cleanup (VEC_cleanup(mem_range_s
), &available
);
1305 if (VEC_empty (mem_range_s
, available
)
1306 || VEC_index (mem_range_s
, available
, 0)->start
!= memaddr
)
1308 /* Don't read into the traceframe's available
1310 if (!VEC_empty (mem_range_s
, available
))
1312 LONGEST oldlen
= len
;
1314 len
= VEC_index (mem_range_s
, available
, 0)->start
- memaddr
;
1315 gdb_assert (len
<= oldlen
);
1318 do_cleanups (old_chain
);
1320 /* This goes through the topmost target again. */
1321 res
= memory_xfer_live_readonly_partial (ops
, object
,
1324 if (res
== TARGET_XFER_OK
)
1325 return TARGET_XFER_OK
;
1328 /* No use trying further, we know some memory starting
1329 at MEMADDR isn't available. */
1331 return TARGET_XFER_E_UNAVAILABLE
;
1335 /* Don't try to read more than how much is available, in
1336 case the target implements the deprecated QTro packet to
1337 cater for older GDBs (the target's knowledge of read-only
1338 sections may be outdated by now). */
1339 len
= VEC_index (mem_range_s
, available
, 0)->length
;
1341 do_cleanups (old_chain
);
1345 /* Try GDB's internal data cache. */
1346 region
= lookup_mem_region (memaddr
);
1347 /* region->hi == 0 means there's no upper bound. */
1348 if (memaddr
+ len
< region
->hi
|| region
->hi
== 0)
1351 reg_len
= region
->hi
- memaddr
;
1353 switch (region
->attrib
.mode
)
1356 if (writebuf
!= NULL
)
1357 return TARGET_XFER_E_IO
;
1361 if (readbuf
!= NULL
)
1362 return TARGET_XFER_E_IO
;
1366 /* We only support writing to flash during "load" for now. */
1367 if (writebuf
!= NULL
)
1368 error (_("Writing to flash memory forbidden in this context"));
1372 return TARGET_XFER_E_IO
;
1375 if (!ptid_equal (inferior_ptid
, null_ptid
))
1376 inf
= find_inferior_pid (ptid_get_pid (inferior_ptid
));
1381 /* The dcache reads whole cache lines; that doesn't play well
1382 with reading from a trace buffer, because reading outside of
1383 the collected memory range fails. */
1384 && get_traceframe_number () == -1
1385 && (region
->attrib
.cache
1386 || (stack_cache_enabled_p () && object
== TARGET_OBJECT_STACK_MEMORY
)
1387 || (code_cache_enabled_p () && object
== TARGET_OBJECT_CODE_MEMORY
)))
1389 DCACHE
*dcache
= target_dcache_get_or_init ();
1392 if (readbuf
!= NULL
)
1393 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, readbuf
, reg_len
, 0);
1395 /* FIXME drow/2006-08-09: If we're going to preserve const
1396 correctness dcache_xfer_memory should take readbuf and
1398 l
= dcache_xfer_memory (ops
, dcache
, memaddr
, (void *) writebuf
,
1401 return TARGET_XFER_E_IO
;
1404 *xfered_len
= (ULONGEST
) l
;
1405 return TARGET_XFER_OK
;
1409 /* If none of those methods found the memory we wanted, fall back
1410 to a target partial transfer. Normally a single call to
1411 to_xfer_partial is enough; if it doesn't recognize an object
1412 it will call the to_xfer_partial of the next target down.
1413 But for memory this won't do. Memory is the only target
1414 object which can be read from more than one valid target.
1415 A core file, for instance, could have some of memory but
1416 delegate other bits to the target below it. So, we must
1417 manually try all targets. */
1419 res
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, memaddr
, reg_len
,
1422 /* Make sure the cache gets updated no matter what - if we are writing
1423 to the stack. Even if this write is not tagged as such, we still need
1424 to update the cache. */
1426 if (res
== TARGET_XFER_OK
1429 && target_dcache_init_p ()
1430 && !region
->attrib
.cache
1431 && ((stack_cache_enabled_p () && object
!= TARGET_OBJECT_STACK_MEMORY
)
1432 || (code_cache_enabled_p () && object
!= TARGET_OBJECT_CODE_MEMORY
)))
1434 DCACHE
*dcache
= target_dcache_get ();
1436 dcache_update (dcache
, memaddr
, (void *) writebuf
, reg_len
);
1439 /* If we still haven't got anything, return the last error. We
1444 /* Perform a partial memory transfer. For docs see target.h,
1447 static enum target_xfer_status
1448 memory_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1449 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1450 ULONGEST memaddr
, ULONGEST len
, ULONGEST
*xfered_len
)
1452 enum target_xfer_status res
;
1454 /* Zero length requests are ok and require no work. */
1456 return TARGET_XFER_EOF
;
1458 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1459 breakpoint insns, thus hiding out from higher layers whether
1460 there are software breakpoints inserted in the code stream. */
1461 if (readbuf
!= NULL
)
1463 res
= memory_xfer_partial_1 (ops
, object
, readbuf
, NULL
, memaddr
, len
,
1466 if (res
== TARGET_XFER_OK
&& !show_memory_breakpoints
)
1467 breakpoint_xfer_memory (readbuf
, NULL
, NULL
, memaddr
, res
);
1472 struct cleanup
*old_chain
;
1474 /* A large write request is likely to be partially satisfied
1475 by memory_xfer_partial_1. We will continually malloc
1476 and free a copy of the entire write request for breakpoint
1477 shadow handling even though we only end up writing a small
1478 subset of it. Cap writes to 4KB to mitigate this. */
1479 len
= min (4096, len
);
1481 buf
= xmalloc (len
);
1482 old_chain
= make_cleanup (xfree
, buf
);
1483 memcpy (buf
, writebuf
, len
);
1485 breakpoint_xfer_memory (NULL
, buf
, writebuf
, memaddr
, len
);
1486 res
= memory_xfer_partial_1 (ops
, object
, NULL
, buf
, memaddr
, len
,
1489 do_cleanups (old_chain
);
1496 restore_show_memory_breakpoints (void *arg
)
1498 show_memory_breakpoints
= (uintptr_t) arg
;
1502 make_show_memory_breakpoints_cleanup (int show
)
1504 int current
= show_memory_breakpoints
;
1506 show_memory_breakpoints
= show
;
1507 return make_cleanup (restore_show_memory_breakpoints
,
1508 (void *) (uintptr_t) current
);
1511 /* For docs see target.h, to_xfer_partial. */
1513 enum target_xfer_status
1514 target_xfer_partial (struct target_ops
*ops
,
1515 enum target_object object
, const char *annex
,
1516 gdb_byte
*readbuf
, const gdb_byte
*writebuf
,
1517 ULONGEST offset
, ULONGEST len
,
1518 ULONGEST
*xfered_len
)
1520 enum target_xfer_status retval
;
1522 gdb_assert (ops
->to_xfer_partial
!= NULL
);
1524 /* Transfer is done when LEN is zero. */
1526 return TARGET_XFER_EOF
;
1528 if (writebuf
&& !may_write_memory
)
1529 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1530 core_addr_to_string_nz (offset
), plongest (len
));
1534 /* If this is a memory transfer, let the memory-specific code
1535 have a look at it instead. Memory transfers are more
1537 if (object
== TARGET_OBJECT_MEMORY
|| object
== TARGET_OBJECT_STACK_MEMORY
1538 || object
== TARGET_OBJECT_CODE_MEMORY
)
1539 retval
= memory_xfer_partial (ops
, object
, readbuf
,
1540 writebuf
, offset
, len
, xfered_len
);
1541 else if (object
== TARGET_OBJECT_RAW_MEMORY
)
1543 /* Request the normal memory object from other layers. */
1544 retval
= raw_memory_xfer_partial (ops
, readbuf
, writebuf
, offset
, len
,
1548 retval
= ops
->to_xfer_partial (ops
, object
, annex
, readbuf
,
1549 writebuf
, offset
, len
, xfered_len
);
1553 const unsigned char *myaddr
= NULL
;
1555 fprintf_unfiltered (gdb_stdlog
,
1556 "%s:target_xfer_partial "
1557 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1560 (annex
? annex
: "(null)"),
1561 host_address_to_string (readbuf
),
1562 host_address_to_string (writebuf
),
1563 core_addr_to_string_nz (offset
),
1564 pulongest (len
), retval
,
1565 pulongest (*xfered_len
));
1571 if (retval
== TARGET_XFER_OK
&& myaddr
!= NULL
)
1575 fputs_unfiltered (", bytes =", gdb_stdlog
);
1576 for (i
= 0; i
< *xfered_len
; i
++)
1578 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
1580 if (targetdebug
< 2 && i
> 0)
1582 fprintf_unfiltered (gdb_stdlog
, " ...");
1585 fprintf_unfiltered (gdb_stdlog
, "\n");
1588 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
1592 fputc_unfiltered ('\n', gdb_stdlog
);
1595 /* Check implementations of to_xfer_partial update *XFERED_LEN
1596 properly. Do assertion after printing debug messages, so that we
1597 can find more clues on assertion failure from debugging messages. */
1598 if (retval
== TARGET_XFER_OK
|| retval
== TARGET_XFER_E_UNAVAILABLE
)
1599 gdb_assert (*xfered_len
> 0);
1604 /* Read LEN bytes of target memory at address MEMADDR, placing the
1605 results in GDB's memory at MYADDR. Returns either 0 for success or
1606 TARGET_XFER_E_IO if any error occurs.
1608 If an error occurs, no guarantee is made about the contents of the data at
1609 MYADDR. In particular, the caller should not depend upon partial reads
1610 filling the buffer with good data. There is no way for the caller to know
1611 how much good data might have been transfered anyway. Callers that can
1612 deal with partial reads should call target_read (which will retry until
1613 it makes no progress, and then return how much was transferred). */
1616 target_read_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1618 /* Dispatch to the topmost target, not the flattened current_target.
1619 Memory accesses check target->to_has_(all_)memory, and the
1620 flattened target doesn't inherit those. */
1621 if (target_read (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1622 myaddr
, memaddr
, len
) == len
)
1625 return TARGET_XFER_E_IO
;
1628 /* Like target_read_memory, but specify explicitly that this is a read
1629 from the target's raw memory. That is, this read bypasses the
1630 dcache, breakpoint shadowing, etc. */
1633 target_read_raw_memory (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1635 /* See comment in target_read_memory about why the request starts at
1636 current_target.beneath. */
1637 if (target_read (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1638 myaddr
, memaddr
, len
) == len
)
1641 return TARGET_XFER_E_IO
;
1644 /* Like target_read_memory, but specify explicitly that this is a read from
1645 the target's stack. This may trigger different cache behavior. */
1648 target_read_stack (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1650 /* See comment in target_read_memory about why the request starts at
1651 current_target.beneath. */
1652 if (target_read (current_target
.beneath
, TARGET_OBJECT_STACK_MEMORY
, NULL
,
1653 myaddr
, memaddr
, len
) == len
)
1656 return TARGET_XFER_E_IO
;
1659 /* Like target_read_memory, but specify explicitly that this is a read from
1660 the target's code. This may trigger different cache behavior. */
1663 target_read_code (CORE_ADDR memaddr
, gdb_byte
*myaddr
, ssize_t len
)
1665 /* See comment in target_read_memory about why the request starts at
1666 current_target.beneath. */
1667 if (target_read (current_target
.beneath
, TARGET_OBJECT_CODE_MEMORY
, NULL
,
1668 myaddr
, memaddr
, len
) == len
)
1671 return TARGET_XFER_E_IO
;
1674 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1675 Returns either 0 for success or TARGET_XFER_E_IO if any
1676 error occurs. If an error occurs, no guarantee is made about how
1677 much data got written. Callers that can deal with partial writes
1678 should call target_write. */
1681 target_write_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1683 /* See comment in target_read_memory about why the request starts at
1684 current_target.beneath. */
1685 if (target_write (current_target
.beneath
, TARGET_OBJECT_MEMORY
, NULL
,
1686 myaddr
, memaddr
, len
) == len
)
1689 return TARGET_XFER_E_IO
;
1692 /* Write LEN bytes from MYADDR to target raw memory at address
1693 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1694 if any error occurs. If an error occurs, no guarantee is made
1695 about how much data got written. Callers that can deal with
1696 partial writes should call target_write. */
1699 target_write_raw_memory (CORE_ADDR memaddr
, const gdb_byte
*myaddr
, ssize_t len
)
1701 /* See comment in target_read_memory about why the request starts at
1702 current_target.beneath. */
1703 if (target_write (current_target
.beneath
, TARGET_OBJECT_RAW_MEMORY
, NULL
,
1704 myaddr
, memaddr
, len
) == len
)
1707 return TARGET_XFER_E_IO
;
1710 /* Fetch the target's memory map. */
1713 target_memory_map (void)
1715 VEC(mem_region_s
) *result
;
1716 struct mem_region
*last_one
, *this_one
;
1718 struct target_ops
*t
;
1721 fprintf_unfiltered (gdb_stdlog
, "target_memory_map ()\n");
1723 result
= current_target
.to_memory_map (¤t_target
);
1727 qsort (VEC_address (mem_region_s
, result
),
1728 VEC_length (mem_region_s
, result
),
1729 sizeof (struct mem_region
), mem_region_cmp
);
1731 /* Check that regions do not overlap. Simultaneously assign
1732 a numbering for the "mem" commands to use to refer to
1735 for (ix
= 0; VEC_iterate (mem_region_s
, result
, ix
, this_one
); ix
++)
1737 this_one
->number
= ix
;
1739 if (last_one
&& last_one
->hi
> this_one
->lo
)
1741 warning (_("Overlapping regions in memory map: ignoring"));
1742 VEC_free (mem_region_s
, result
);
1745 last_one
= this_one
;
1752 target_flash_erase (ULONGEST address
, LONGEST length
)
1755 fprintf_unfiltered (gdb_stdlog
, "target_flash_erase (%s, %s)\n",
1756 hex_string (address
), phex (length
, 0));
1757 current_target
.to_flash_erase (¤t_target
, address
, length
);
1761 target_flash_done (void)
1764 fprintf_unfiltered (gdb_stdlog
, "target_flash_done\n");
1765 current_target
.to_flash_done (¤t_target
);
1769 show_trust_readonly (struct ui_file
*file
, int from_tty
,
1770 struct cmd_list_element
*c
, const char *value
)
1772 fprintf_filtered (file
,
1773 _("Mode for reading from readonly sections is %s.\n"),
1777 /* More generic transfers. */
1779 static enum target_xfer_status
1780 default_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1781 const char *annex
, gdb_byte
*readbuf
,
1782 const gdb_byte
*writebuf
, ULONGEST offset
, ULONGEST len
,
1783 ULONGEST
*xfered_len
)
1785 if (object
== TARGET_OBJECT_MEMORY
1786 && ops
->deprecated_xfer_memory
!= NULL
)
1787 /* If available, fall back to the target's
1788 "deprecated_xfer_memory" method. */
1793 if (writebuf
!= NULL
)
1795 void *buffer
= xmalloc (len
);
1796 struct cleanup
*cleanup
= make_cleanup (xfree
, buffer
);
1798 memcpy (buffer
, writebuf
, len
);
1799 xfered
= ops
->deprecated_xfer_memory (offset
, buffer
, len
,
1800 1/*write*/, NULL
, ops
);
1801 do_cleanups (cleanup
);
1803 if (readbuf
!= NULL
)
1804 xfered
= ops
->deprecated_xfer_memory (offset
, readbuf
, len
,
1805 0/*read*/, NULL
, ops
);
1808 *xfered_len
= (ULONGEST
) xfered
;
1809 return TARGET_XFER_E_IO
;
1811 else if (xfered
== 0 && errno
== 0)
1812 /* "deprecated_xfer_memory" uses 0, cross checked against
1813 ERRNO as one indication of an error. */
1814 return TARGET_XFER_EOF
;
1816 return TARGET_XFER_E_IO
;
1820 gdb_assert (ops
->beneath
!= NULL
);
1821 return ops
->beneath
->to_xfer_partial (ops
->beneath
, object
, annex
,
1822 readbuf
, writebuf
, offset
, len
,
1827 /* Target vector read/write partial wrapper functions. */
1829 static enum target_xfer_status
1830 target_read_partial (struct target_ops
*ops
,
1831 enum target_object object
,
1832 const char *annex
, gdb_byte
*buf
,
1833 ULONGEST offset
, ULONGEST len
,
1834 ULONGEST
*xfered_len
)
1836 return target_xfer_partial (ops
, object
, annex
, buf
, NULL
, offset
, len
,
1840 static enum target_xfer_status
1841 target_write_partial (struct target_ops
*ops
,
1842 enum target_object object
,
1843 const char *annex
, const gdb_byte
*buf
,
1844 ULONGEST offset
, LONGEST len
, ULONGEST
*xfered_len
)
1846 return target_xfer_partial (ops
, object
, annex
, NULL
, buf
, offset
, len
,
1850 /* Wrappers to perform the full transfer. */
1852 /* For docs on target_read see target.h. */
1855 target_read (struct target_ops
*ops
,
1856 enum target_object object
,
1857 const char *annex
, gdb_byte
*buf
,
1858 ULONGEST offset
, LONGEST len
)
1862 while (xfered
< len
)
1864 ULONGEST xfered_len
;
1865 enum target_xfer_status status
;
1867 status
= target_read_partial (ops
, object
, annex
,
1868 (gdb_byte
*) buf
+ xfered
,
1869 offset
+ xfered
, len
- xfered
,
1872 /* Call an observer, notifying them of the xfer progress? */
1873 if (status
== TARGET_XFER_EOF
)
1875 else if (status
== TARGET_XFER_OK
)
1877 xfered
+= xfered_len
;
1887 /* Assuming that the entire [begin, end) range of memory cannot be
1888 read, try to read whatever subrange is possible to read.
1890 The function returns, in RESULT, either zero or one memory block.
1891 If there's a readable subrange at the beginning, it is completely
1892 read and returned. Any further readable subrange will not be read.
1893 Otherwise, if there's a readable subrange at the end, it will be
1894 completely read and returned. Any readable subranges before it
1895 (obviously, not starting at the beginning), will be ignored. In
1896 other cases -- either no readable subrange, or readable subrange(s)
1897 that is neither at the beginning, or end, nothing is returned.
1899 The purpose of this function is to handle a read across a boundary
1900 of accessible memory in a case when memory map is not available.
1901 The above restrictions are fine for this case, but will give
1902 incorrect results if the memory is 'patchy'. However, supporting
1903 'patchy' memory would require trying to read every single byte,
1904 and it seems unacceptable solution. Explicit memory map is
1905 recommended for this case -- and target_read_memory_robust will
1906 take care of reading multiple ranges then. */
1909 read_whatever_is_readable (struct target_ops
*ops
,
1910 ULONGEST begin
, ULONGEST end
,
1911 VEC(memory_read_result_s
) **result
)
1913 gdb_byte
*buf
= xmalloc (end
- begin
);
1914 ULONGEST current_begin
= begin
;
1915 ULONGEST current_end
= end
;
1917 memory_read_result_s r
;
1918 ULONGEST xfered_len
;
1920 /* If we previously failed to read 1 byte, nothing can be done here. */
1921 if (end
- begin
<= 1)
1927 /* Check that either first or the last byte is readable, and give up
1928 if not. This heuristic is meant to permit reading accessible memory
1929 at the boundary of accessible region. */
1930 if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1931 buf
, begin
, 1, &xfered_len
) == TARGET_XFER_OK
)
1936 else if (target_read_partial (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1937 buf
+ (end
-begin
) - 1, end
- 1, 1,
1938 &xfered_len
) == TARGET_XFER_OK
)
1949 /* Loop invariant is that the [current_begin, current_end) was previously
1950 found to be not readable as a whole.
1952 Note loop condition -- if the range has 1 byte, we can't divide the range
1953 so there's no point trying further. */
1954 while (current_end
- current_begin
> 1)
1956 ULONGEST first_half_begin
, first_half_end
;
1957 ULONGEST second_half_begin
, second_half_end
;
1959 ULONGEST middle
= current_begin
+ (current_end
- current_begin
)/2;
1963 first_half_begin
= current_begin
;
1964 first_half_end
= middle
;
1965 second_half_begin
= middle
;
1966 second_half_end
= current_end
;
1970 first_half_begin
= middle
;
1971 first_half_end
= current_end
;
1972 second_half_begin
= current_begin
;
1973 second_half_end
= middle
;
1976 xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
1977 buf
+ (first_half_begin
- begin
),
1979 first_half_end
- first_half_begin
);
1981 if (xfer
== first_half_end
- first_half_begin
)
1983 /* This half reads up fine. So, the error must be in the
1985 current_begin
= second_half_begin
;
1986 current_end
= second_half_end
;
1990 /* This half is not readable. Because we've tried one byte, we
1991 know some part of this half if actually redable. Go to the next
1992 iteration to divide again and try to read.
1994 We don't handle the other half, because this function only tries
1995 to read a single readable subrange. */
1996 current_begin
= first_half_begin
;
1997 current_end
= first_half_end
;
2003 /* The [begin, current_begin) range has been read. */
2005 r
.end
= current_begin
;
2010 /* The [current_end, end) range has been read. */
2011 LONGEST rlen
= end
- current_end
;
2013 r
.data
= xmalloc (rlen
);
2014 memcpy (r
.data
, buf
+ current_end
- begin
, rlen
);
2015 r
.begin
= current_end
;
2019 VEC_safe_push(memory_read_result_s
, (*result
), &r
);
2023 free_memory_read_result_vector (void *x
)
2025 VEC(memory_read_result_s
) *v
= x
;
2026 memory_read_result_s
*current
;
2029 for (ix
= 0; VEC_iterate (memory_read_result_s
, v
, ix
, current
); ++ix
)
2031 xfree (current
->data
);
2033 VEC_free (memory_read_result_s
, v
);
2036 VEC(memory_read_result_s
) *
2037 read_memory_robust (struct target_ops
*ops
, ULONGEST offset
, LONGEST len
)
2039 VEC(memory_read_result_s
) *result
= 0;
2042 while (xfered
< len
)
2044 struct mem_region
*region
= lookup_mem_region (offset
+ xfered
);
2047 /* If there is no explicit region, a fake one should be created. */
2048 gdb_assert (region
);
2050 if (region
->hi
== 0)
2051 rlen
= len
- xfered
;
2053 rlen
= region
->hi
- offset
;
2055 if (region
->attrib
.mode
== MEM_NONE
|| region
->attrib
.mode
== MEM_WO
)
2057 /* Cannot read this region. Note that we can end up here only
2058 if the region is explicitly marked inaccessible, or
2059 'inaccessible-by-default' is in effect. */
2064 LONGEST to_read
= min (len
- xfered
, rlen
);
2065 gdb_byte
*buffer
= (gdb_byte
*)xmalloc (to_read
);
2067 LONGEST xfer
= target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2068 (gdb_byte
*) buffer
,
2069 offset
+ xfered
, to_read
);
2070 /* Call an observer, notifying them of the xfer progress? */
2073 /* Got an error reading full chunk. See if maybe we can read
2076 read_whatever_is_readable (ops
, offset
+ xfered
,
2077 offset
+ xfered
+ to_read
, &result
);
2082 struct memory_read_result r
;
2084 r
.begin
= offset
+ xfered
;
2085 r
.end
= r
.begin
+ xfer
;
2086 VEC_safe_push (memory_read_result_s
, result
, &r
);
2096 /* An alternative to target_write with progress callbacks. */
2099 target_write_with_progress (struct target_ops
*ops
,
2100 enum target_object object
,
2101 const char *annex
, const gdb_byte
*buf
,
2102 ULONGEST offset
, LONGEST len
,
2103 void (*progress
) (ULONGEST
, void *), void *baton
)
2107 /* Give the progress callback a chance to set up. */
2109 (*progress
) (0, baton
);
2111 while (xfered
< len
)
2113 ULONGEST xfered_len
;
2114 enum target_xfer_status status
;
2116 status
= target_write_partial (ops
, object
, annex
,
2117 (gdb_byte
*) buf
+ xfered
,
2118 offset
+ xfered
, len
- xfered
,
2121 if (status
== TARGET_XFER_EOF
)
2123 if (TARGET_XFER_STATUS_ERROR_P (status
))
2126 gdb_assert (status
== TARGET_XFER_OK
);
2128 (*progress
) (xfered_len
, baton
);
2130 xfered
+= xfered_len
;
2136 /* For docs on target_write see target.h. */
2139 target_write (struct target_ops
*ops
,
2140 enum target_object object
,
2141 const char *annex
, const gdb_byte
*buf
,
2142 ULONGEST offset
, LONGEST len
)
2144 return target_write_with_progress (ops
, object
, annex
, buf
, offset
, len
,
2148 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2149 the size of the transferred data. PADDING additional bytes are
2150 available in *BUF_P. This is a helper function for
2151 target_read_alloc; see the declaration of that function for more
2155 target_read_alloc_1 (struct target_ops
*ops
, enum target_object object
,
2156 const char *annex
, gdb_byte
**buf_p
, int padding
)
2158 size_t buf_alloc
, buf_pos
;
2161 /* This function does not have a length parameter; it reads the
2162 entire OBJECT). Also, it doesn't support objects fetched partly
2163 from one target and partly from another (in a different stratum,
2164 e.g. a core file and an executable). Both reasons make it
2165 unsuitable for reading memory. */
2166 gdb_assert (object
!= TARGET_OBJECT_MEMORY
);
2168 /* Start by reading up to 4K at a time. The target will throttle
2169 this number down if necessary. */
2171 buf
= xmalloc (buf_alloc
);
2175 ULONGEST xfered_len
;
2176 enum target_xfer_status status
;
2178 status
= target_read_partial (ops
, object
, annex
, &buf
[buf_pos
],
2179 buf_pos
, buf_alloc
- buf_pos
- padding
,
2182 if (status
== TARGET_XFER_EOF
)
2184 /* Read all there was. */
2191 else if (status
!= TARGET_XFER_OK
)
2193 /* An error occurred. */
2195 return TARGET_XFER_E_IO
;
2198 buf_pos
+= xfered_len
;
2200 /* If the buffer is filling up, expand it. */
2201 if (buf_alloc
< buf_pos
* 2)
2204 buf
= xrealloc (buf
, buf_alloc
);
2211 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2212 the size of the transferred data. See the declaration in "target.h"
2213 function for more information about the return value. */
2216 target_read_alloc (struct target_ops
*ops
, enum target_object object
,
2217 const char *annex
, gdb_byte
**buf_p
)
2219 return target_read_alloc_1 (ops
, object
, annex
, buf_p
, 0);
2222 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2223 returned as a string, allocated using xmalloc. If an error occurs
2224 or the transfer is unsupported, NULL is returned. Empty objects
2225 are returned as allocated but empty strings. A warning is issued
2226 if the result contains any embedded NUL bytes. */
2229 target_read_stralloc (struct target_ops
*ops
, enum target_object object
,
2234 LONGEST i
, transferred
;
2236 transferred
= target_read_alloc_1 (ops
, object
, annex
, &buffer
, 1);
2237 bufstr
= (char *) buffer
;
2239 if (transferred
< 0)
2242 if (transferred
== 0)
2243 return xstrdup ("");
2245 bufstr
[transferred
] = 0;
2247 /* Check for embedded NUL bytes; but allow trailing NULs. */
2248 for (i
= strlen (bufstr
); i
< transferred
; i
++)
2251 warning (_("target object %d, annex %s, "
2252 "contained unexpected null characters"),
2253 (int) object
, annex
? annex
: "(none)");
2260 /* Memory transfer methods. */
2263 get_target_memory (struct target_ops
*ops
, CORE_ADDR addr
, gdb_byte
*buf
,
2266 /* This method is used to read from an alternate, non-current
2267 target. This read must bypass the overlay support (as symbols
2268 don't match this target), and GDB's internal cache (wrong cache
2269 for this target). */
2270 if (target_read (ops
, TARGET_OBJECT_RAW_MEMORY
, NULL
, buf
, addr
, len
)
2272 memory_error (TARGET_XFER_E_IO
, addr
);
2276 get_target_memory_unsigned (struct target_ops
*ops
, CORE_ADDR addr
,
2277 int len
, enum bfd_endian byte_order
)
2279 gdb_byte buf
[sizeof (ULONGEST
)];
2281 gdb_assert (len
<= sizeof (buf
));
2282 get_target_memory (ops
, addr
, buf
, len
);
2283 return extract_unsigned_integer (buf
, len
, byte_order
);
2289 target_insert_breakpoint (struct gdbarch
*gdbarch
,
2290 struct bp_target_info
*bp_tgt
)
2292 if (!may_insert_breakpoints
)
2294 warning (_("May not insert breakpoints"));
2298 return current_target
.to_insert_breakpoint (¤t_target
,
2305 target_remove_breakpoint (struct gdbarch
*gdbarch
,
2306 struct bp_target_info
*bp_tgt
)
2308 /* This is kind of a weird case to handle, but the permission might
2309 have been changed after breakpoints were inserted - in which case
2310 we should just take the user literally and assume that any
2311 breakpoints should be left in place. */
2312 if (!may_insert_breakpoints
)
2314 warning (_("May not remove breakpoints"));
2318 return current_target
.to_remove_breakpoint (¤t_target
,
2323 target_info (char *args
, int from_tty
)
2325 struct target_ops
*t
;
2326 int has_all_mem
= 0;
2328 if (symfile_objfile
!= NULL
)
2329 printf_unfiltered (_("Symbols from \"%s\".\n"),
2330 objfile_name (symfile_objfile
));
2332 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2334 if (!(*t
->to_has_memory
) (t
))
2337 if ((int) (t
->to_stratum
) <= (int) dummy_stratum
)
2340 printf_unfiltered (_("\tWhile running this, "
2341 "GDB does not access memory from...\n"));
2342 printf_unfiltered ("%s:\n", t
->to_longname
);
2343 (t
->to_files_info
) (t
);
2344 has_all_mem
= (*t
->to_has_all_memory
) (t
);
2348 /* This function is called before any new inferior is created, e.g.
2349 by running a program, attaching, or connecting to a target.
2350 It cleans up any state from previous invocations which might
2351 change between runs. This is a subset of what target_preopen
2352 resets (things which might change between targets). */
2355 target_pre_inferior (int from_tty
)
2357 /* Clear out solib state. Otherwise the solib state of the previous
2358 inferior might have survived and is entirely wrong for the new
2359 target. This has been observed on GNU/Linux using glibc 2.3. How
2371 Cannot access memory at address 0xdeadbeef
2374 /* In some OSs, the shared library list is the same/global/shared
2375 across inferiors. If code is shared between processes, so are
2376 memory regions and features. */
2377 if (!gdbarch_has_global_solist (target_gdbarch ()))
2379 no_shared_libraries (NULL
, from_tty
);
2381 invalidate_target_mem_regions ();
2383 target_clear_description ();
2386 agent_capability_invalidate ();
2389 /* Callback for iterate_over_inferiors. Gets rid of the given
2393 dispose_inferior (struct inferior
*inf
, void *args
)
2395 struct thread_info
*thread
;
2397 thread
= any_thread_of_process (inf
->pid
);
2400 switch_to_thread (thread
->ptid
);
2402 /* Core inferiors actually should be detached, not killed. */
2403 if (target_has_execution
)
2406 target_detach (NULL
, 0);
2412 /* This is to be called by the open routine before it does
2416 target_preopen (int from_tty
)
2420 if (have_inferiors ())
2423 || !have_live_inferiors ()
2424 || query (_("A program is being debugged already. Kill it? ")))
2425 iterate_over_inferiors (dispose_inferior
, NULL
);
2427 error (_("Program not killed."));
2430 /* Calling target_kill may remove the target from the stack. But if
2431 it doesn't (which seems like a win for UDI), remove it now. */
2432 /* Leave the exec target, though. The user may be switching from a
2433 live process to a core of the same program. */
2434 pop_all_targets_above (file_stratum
);
2436 target_pre_inferior (from_tty
);
2439 /* Detach a target after doing deferred register stores. */
2442 target_detach (const char *args
, int from_tty
)
2444 struct target_ops
* t
;
2446 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2447 /* Don't remove global breakpoints here. They're removed on
2448 disconnection from the target. */
2451 /* If we're in breakpoints-always-inserted mode, have to remove
2452 them before detaching. */
2453 remove_breakpoints_pid (ptid_get_pid (inferior_ptid
));
2455 prepare_for_detach ();
2457 current_target
.to_detach (¤t_target
, args
, from_tty
);
2459 fprintf_unfiltered (gdb_stdlog
, "target_detach (%s, %d)\n",
2464 target_disconnect (char *args
, int from_tty
)
2466 struct target_ops
*t
;
2468 /* If we're in breakpoints-always-inserted mode or if breakpoints
2469 are global across processes, we have to remove them before
2471 remove_breakpoints ();
2473 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2474 if (t
->to_disconnect
!= NULL
)
2477 fprintf_unfiltered (gdb_stdlog
, "target_disconnect (%s, %d)\n",
2479 t
->to_disconnect (t
, args
, from_tty
);
2487 target_wait (ptid_t ptid
, struct target_waitstatus
*status
, int options
)
2489 struct target_ops
*t
;
2490 ptid_t retval
= (current_target
.to_wait
) (¤t_target
, ptid
,
2495 char *status_string
;
2496 char *options_string
;
2498 status_string
= target_waitstatus_to_string (status
);
2499 options_string
= target_options_to_string (options
);
2500 fprintf_unfiltered (gdb_stdlog
,
2501 "target_wait (%d, status, options={%s})"
2503 ptid_get_pid (ptid
), options_string
,
2504 ptid_get_pid (retval
), status_string
);
2505 xfree (status_string
);
2506 xfree (options_string
);
2513 target_pid_to_str (ptid_t ptid
)
2515 return (*current_target
.to_pid_to_str
) (¤t_target
, ptid
);
2519 target_thread_name (struct thread_info
*info
)
2521 return current_target
.to_thread_name (¤t_target
, info
);
2525 target_resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2527 struct target_ops
*t
;
2529 target_dcache_invalidate ();
2531 current_target
.to_resume (¤t_target
, ptid
, step
, signal
);
2533 fprintf_unfiltered (gdb_stdlog
, "target_resume (%d, %s, %s)\n",
2534 ptid_get_pid (ptid
),
2535 step
? "step" : "continue",
2536 gdb_signal_to_name (signal
));
2538 registers_changed_ptid (ptid
);
2539 set_executing (ptid
, 1);
2540 set_running (ptid
, 1);
2541 clear_inline_frame_state (ptid
);
2545 target_pass_signals (int numsigs
, unsigned char *pass_signals
)
2551 fprintf_unfiltered (gdb_stdlog
, "target_pass_signals (%d, {",
2554 for (i
= 0; i
< numsigs
; i
++)
2555 if (pass_signals
[i
])
2556 fprintf_unfiltered (gdb_stdlog
, " %s",
2557 gdb_signal_to_name (i
));
2559 fprintf_unfiltered (gdb_stdlog
, " })\n");
2562 (*current_target
.to_pass_signals
) (¤t_target
, numsigs
, pass_signals
);
2566 target_program_signals (int numsigs
, unsigned char *program_signals
)
2572 fprintf_unfiltered (gdb_stdlog
, "target_program_signals (%d, {",
2575 for (i
= 0; i
< numsigs
; i
++)
2576 if (program_signals
[i
])
2577 fprintf_unfiltered (gdb_stdlog
, " %s",
2578 gdb_signal_to_name (i
));
2580 fprintf_unfiltered (gdb_stdlog
, " })\n");
2583 (*current_target
.to_program_signals
) (¤t_target
,
2584 numsigs
, program_signals
);
2588 default_follow_fork (struct target_ops
*self
, int follow_child
,
2591 /* Some target returned a fork event, but did not know how to follow it. */
2592 internal_error (__FILE__
, __LINE__
,
2593 _("could not find a target to follow fork"));
2596 /* Look through the list of possible targets for a target that can
2600 target_follow_fork (int follow_child
, int detach_fork
)
2602 int retval
= current_target
.to_follow_fork (¤t_target
,
2603 follow_child
, detach_fork
);
2606 fprintf_unfiltered (gdb_stdlog
,
2607 "target_follow_fork (%d, %d) = %d\n",
2608 follow_child
, detach_fork
, retval
);
2613 default_mourn_inferior (struct target_ops
*self
)
2615 internal_error (__FILE__
, __LINE__
,
2616 _("could not find a target to follow mourn inferior"));
2620 target_mourn_inferior (void)
2622 current_target
.to_mourn_inferior (¤t_target
);
2624 fprintf_unfiltered (gdb_stdlog
, "target_mourn_inferior ()\n");
2626 /* We no longer need to keep handles on any of the object files.
2627 Make sure to release them to avoid unnecessarily locking any
2628 of them while we're not actually debugging. */
2629 bfd_cache_close_all ();
2632 /* Look for a target which can describe architectural features, starting
2633 from TARGET. If we find one, return its description. */
2635 const struct target_desc
*
2636 target_read_description (struct target_ops
*target
)
2638 struct target_ops
*t
;
2640 for (t
= target
; t
!= NULL
; t
= t
->beneath
)
2641 if (t
->to_read_description
!= NULL
)
2643 const struct target_desc
*tdesc
;
2645 tdesc
= t
->to_read_description (t
);
2653 /* The default implementation of to_search_memory.
2654 This implements a basic search of memory, reading target memory and
2655 performing the search here (as opposed to performing the search in on the
2656 target side with, for example, gdbserver). */
2659 simple_search_memory (struct target_ops
*ops
,
2660 CORE_ADDR start_addr
, ULONGEST search_space_len
,
2661 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2662 CORE_ADDR
*found_addrp
)
2664 /* NOTE: also defined in find.c testcase. */
2665 #define SEARCH_CHUNK_SIZE 16000
2666 const unsigned chunk_size
= SEARCH_CHUNK_SIZE
;
2667 /* Buffer to hold memory contents for searching. */
2668 gdb_byte
*search_buf
;
2669 unsigned search_buf_size
;
2670 struct cleanup
*old_cleanups
;
2672 search_buf_size
= chunk_size
+ pattern_len
- 1;
2674 /* No point in trying to allocate a buffer larger than the search space. */
2675 if (search_space_len
< search_buf_size
)
2676 search_buf_size
= search_space_len
;
2678 search_buf
= malloc (search_buf_size
);
2679 if (search_buf
== NULL
)
2680 error (_("Unable to allocate memory to perform the search."));
2681 old_cleanups
= make_cleanup (free_current_contents
, &search_buf
);
2683 /* Prime the search buffer. */
2685 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2686 search_buf
, start_addr
, search_buf_size
) != search_buf_size
)
2688 warning (_("Unable to access %s bytes of target "
2689 "memory at %s, halting search."),
2690 pulongest (search_buf_size
), hex_string (start_addr
));
2691 do_cleanups (old_cleanups
);
2695 /* Perform the search.
2697 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2698 When we've scanned N bytes we copy the trailing bytes to the start and
2699 read in another N bytes. */
2701 while (search_space_len
>= pattern_len
)
2703 gdb_byte
*found_ptr
;
2704 unsigned nr_search_bytes
= min (search_space_len
, search_buf_size
);
2706 found_ptr
= memmem (search_buf
, nr_search_bytes
,
2707 pattern
, pattern_len
);
2709 if (found_ptr
!= NULL
)
2711 CORE_ADDR found_addr
= start_addr
+ (found_ptr
- search_buf
);
2713 *found_addrp
= found_addr
;
2714 do_cleanups (old_cleanups
);
2718 /* Not found in this chunk, skip to next chunk. */
2720 /* Don't let search_space_len wrap here, it's unsigned. */
2721 if (search_space_len
>= chunk_size
)
2722 search_space_len
-= chunk_size
;
2724 search_space_len
= 0;
2726 if (search_space_len
>= pattern_len
)
2728 unsigned keep_len
= search_buf_size
- chunk_size
;
2729 CORE_ADDR read_addr
= start_addr
+ chunk_size
+ keep_len
;
2732 /* Copy the trailing part of the previous iteration to the front
2733 of the buffer for the next iteration. */
2734 gdb_assert (keep_len
== pattern_len
- 1);
2735 memcpy (search_buf
, search_buf
+ chunk_size
, keep_len
);
2737 nr_to_read
= min (search_space_len
- keep_len
, chunk_size
);
2739 if (target_read (ops
, TARGET_OBJECT_MEMORY
, NULL
,
2740 search_buf
+ keep_len
, read_addr
,
2741 nr_to_read
) != nr_to_read
)
2743 warning (_("Unable to access %s bytes of target "
2744 "memory at %s, halting search."),
2745 plongest (nr_to_read
),
2746 hex_string (read_addr
));
2747 do_cleanups (old_cleanups
);
2751 start_addr
+= chunk_size
;
2757 do_cleanups (old_cleanups
);
2761 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2762 sequence of bytes in PATTERN with length PATTERN_LEN.
2764 The result is 1 if found, 0 if not found, and -1 if there was an error
2765 requiring halting of the search (e.g. memory read error).
2766 If the pattern is found the address is recorded in FOUND_ADDRP. */
2769 target_search_memory (CORE_ADDR start_addr
, ULONGEST search_space_len
,
2770 const gdb_byte
*pattern
, ULONGEST pattern_len
,
2771 CORE_ADDR
*found_addrp
)
2773 struct target_ops
*t
;
2776 /* We don't use INHERIT to set current_target.to_search_memory,
2777 so we have to scan the target stack and handle targetdebug
2781 fprintf_unfiltered (gdb_stdlog
, "target_search_memory (%s, ...)\n",
2782 hex_string (start_addr
));
2784 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
2785 if (t
->to_search_memory
!= NULL
)
2790 found
= t
->to_search_memory (t
, start_addr
, search_space_len
,
2791 pattern
, pattern_len
, found_addrp
);
2795 /* If a special version of to_search_memory isn't available, use the
2797 found
= simple_search_memory (current_target
.beneath
,
2798 start_addr
, search_space_len
,
2799 pattern
, pattern_len
, found_addrp
);
2803 fprintf_unfiltered (gdb_stdlog
, " = %d\n", found
);
2808 /* Look through the currently pushed targets. If none of them will
2809 be able to restart the currently running process, issue an error
2813 target_require_runnable (void)
2815 struct target_ops
*t
;
2817 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
2819 /* If this target knows how to create a new program, then
2820 assume we will still be able to after killing the current
2821 one. Either killing and mourning will not pop T, or else
2822 find_default_run_target will find it again. */
2823 if (t
->to_create_inferior
!= NULL
)
2826 /* Do not worry about thread_stratum targets that can not
2827 create inferiors. Assume they will be pushed again if
2828 necessary, and continue to the process_stratum. */
2829 if (t
->to_stratum
== thread_stratum
2830 || t
->to_stratum
== arch_stratum
)
2833 error (_("The \"%s\" target does not support \"run\". "
2834 "Try \"help target\" or \"continue\"."),
2838 /* This function is only called if the target is running. In that
2839 case there should have been a process_stratum target and it
2840 should either know how to create inferiors, or not... */
2841 internal_error (__FILE__
, __LINE__
, _("No targets found"));
2844 /* Look through the list of possible targets for a target that can
2845 execute a run or attach command without any other data. This is
2846 used to locate the default process stratum.
2848 If DO_MESG is not NULL, the result is always valid (error() is
2849 called for errors); else, return NULL on error. */
2851 static struct target_ops
*
2852 find_default_run_target (char *do_mesg
)
2854 struct target_ops
**t
;
2855 struct target_ops
*runable
= NULL
;
2860 for (t
= target_structs
; t
< target_structs
+ target_struct_size
;
2863 if ((*t
)->to_can_run
&& target_can_run (*t
))
2873 error (_("Don't know how to %s. Try \"help target\"."), do_mesg
);
2882 find_default_attach (struct target_ops
*ops
, char *args
, int from_tty
)
2884 struct target_ops
*t
;
2886 t
= find_default_run_target ("attach");
2887 (t
->to_attach
) (t
, args
, from_tty
);
2892 find_default_create_inferior (struct target_ops
*ops
,
2893 char *exec_file
, char *allargs
, char **env
,
2896 struct target_ops
*t
;
2898 t
= find_default_run_target ("run");
2899 (t
->to_create_inferior
) (t
, exec_file
, allargs
, env
, from_tty
);
2904 find_default_can_async_p (struct target_ops
*ignore
)
2906 struct target_ops
*t
;
2908 /* This may be called before the target is pushed on the stack;
2909 look for the default process stratum. If there's none, gdb isn't
2910 configured with a native debugger, and target remote isn't
2912 t
= find_default_run_target (NULL
);
2913 if (t
&& t
->to_can_async_p
!= delegate_can_async_p
)
2914 return (t
->to_can_async_p
) (t
);
2919 find_default_is_async_p (struct target_ops
*ignore
)
2921 struct target_ops
*t
;
2923 /* This may be called before the target is pushed on the stack;
2924 look for the default process stratum. If there's none, gdb isn't
2925 configured with a native debugger, and target remote isn't
2927 t
= find_default_run_target (NULL
);
2928 if (t
&& t
->to_is_async_p
!= delegate_is_async_p
)
2929 return (t
->to_is_async_p
) (t
);
2934 find_default_supports_non_stop (struct target_ops
*self
)
2936 struct target_ops
*t
;
2938 t
= find_default_run_target (NULL
);
2939 if (t
&& t
->to_supports_non_stop
)
2940 return (t
->to_supports_non_stop
) (t
);
2945 target_supports_non_stop (void)
2947 struct target_ops
*t
;
2949 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
2950 if (t
->to_supports_non_stop
)
2951 return t
->to_supports_non_stop (t
);
2956 /* Implement the "info proc" command. */
2959 target_info_proc (char *args
, enum info_proc_what what
)
2961 struct target_ops
*t
;
2963 /* If we're already connected to something that can get us OS
2964 related data, use it. Otherwise, try using the native
2966 if (current_target
.to_stratum
>= process_stratum
)
2967 t
= current_target
.beneath
;
2969 t
= find_default_run_target (NULL
);
2971 for (; t
!= NULL
; t
= t
->beneath
)
2973 if (t
->to_info_proc
!= NULL
)
2975 t
->to_info_proc (t
, args
, what
);
2978 fprintf_unfiltered (gdb_stdlog
,
2979 "target_info_proc (\"%s\", %d)\n", args
, what
);
2989 find_default_supports_disable_randomization (struct target_ops
*self
)
2991 struct target_ops
*t
;
2993 t
= find_default_run_target (NULL
);
2994 if (t
&& t
->to_supports_disable_randomization
)
2995 return (t
->to_supports_disable_randomization
) (t
);
3000 target_supports_disable_randomization (void)
3002 struct target_ops
*t
;
3004 for (t
= ¤t_target
; t
!= NULL
; t
= t
->beneath
)
3005 if (t
->to_supports_disable_randomization
)
3006 return t
->to_supports_disable_randomization (t
);
3012 target_get_osdata (const char *type
)
3014 struct target_ops
*t
;
3016 /* If we're already connected to something that can get us OS
3017 related data, use it. Otherwise, try using the native
3019 if (current_target
.to_stratum
>= process_stratum
)
3020 t
= current_target
.beneath
;
3022 t
= find_default_run_target ("get OS data");
3027 return target_read_stralloc (t
, TARGET_OBJECT_OSDATA
, type
);
3030 /* Determine the current address space of thread PTID. */
3032 struct address_space
*
3033 target_thread_address_space (ptid_t ptid
)
3035 struct address_space
*aspace
;
3036 struct inferior
*inf
;
3037 struct target_ops
*t
;
3039 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3041 if (t
->to_thread_address_space
!= NULL
)
3043 aspace
= t
->to_thread_address_space (t
, ptid
);
3044 gdb_assert (aspace
);
3047 fprintf_unfiltered (gdb_stdlog
,
3048 "target_thread_address_space (%s) = %d\n",
3049 target_pid_to_str (ptid
),
3050 address_space_num (aspace
));
3055 /* Fall-back to the "main" address space of the inferior. */
3056 inf
= find_inferior_pid (ptid_get_pid (ptid
));
3058 if (inf
== NULL
|| inf
->aspace
== NULL
)
3059 internal_error (__FILE__
, __LINE__
,
3060 _("Can't determine the current "
3061 "address space of thread %s\n"),
3062 target_pid_to_str (ptid
));
3068 /* Target file operations. */
3070 static struct target_ops
*
3071 default_fileio_target (void)
3073 /* If we're already connected to something that can perform
3074 file I/O, use it. Otherwise, try using the native target. */
3075 if (current_target
.to_stratum
>= process_stratum
)
3076 return current_target
.beneath
;
3078 return find_default_run_target ("file I/O");
3081 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3082 target file descriptor, or -1 if an error occurs (and set
3085 target_fileio_open (const char *filename
, int flags
, int mode
,
3088 struct target_ops
*t
;
3090 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3092 if (t
->to_fileio_open
!= NULL
)
3094 int fd
= t
->to_fileio_open (t
, filename
, flags
, mode
, target_errno
);
3097 fprintf_unfiltered (gdb_stdlog
,
3098 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3099 filename
, flags
, mode
,
3100 fd
, fd
!= -1 ? 0 : *target_errno
);
3105 *target_errno
= FILEIO_ENOSYS
;
3109 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3110 Return the number of bytes written, or -1 if an error occurs
3111 (and set *TARGET_ERRNO). */
3113 target_fileio_pwrite (int fd
, const gdb_byte
*write_buf
, int len
,
3114 ULONGEST offset
, int *target_errno
)
3116 struct target_ops
*t
;
3118 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3120 if (t
->to_fileio_pwrite
!= NULL
)
3122 int ret
= t
->to_fileio_pwrite (t
, fd
, write_buf
, len
, offset
,
3126 fprintf_unfiltered (gdb_stdlog
,
3127 "target_fileio_pwrite (%d,...,%d,%s) "
3129 fd
, len
, pulongest (offset
),
3130 ret
, ret
!= -1 ? 0 : *target_errno
);
3135 *target_errno
= FILEIO_ENOSYS
;
3139 /* Read up to LEN bytes FD on the target into READ_BUF.
3140 Return the number of bytes read, or -1 if an error occurs
3141 (and set *TARGET_ERRNO). */
3143 target_fileio_pread (int fd
, gdb_byte
*read_buf
, int len
,
3144 ULONGEST offset
, int *target_errno
)
3146 struct target_ops
*t
;
3148 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3150 if (t
->to_fileio_pread
!= NULL
)
3152 int ret
= t
->to_fileio_pread (t
, fd
, read_buf
, len
, offset
,
3156 fprintf_unfiltered (gdb_stdlog
,
3157 "target_fileio_pread (%d,...,%d,%s) "
3159 fd
, len
, pulongest (offset
),
3160 ret
, ret
!= -1 ? 0 : *target_errno
);
3165 *target_errno
= FILEIO_ENOSYS
;
3169 /* Close FD on the target. Return 0, or -1 if an error occurs
3170 (and set *TARGET_ERRNO). */
3172 target_fileio_close (int fd
, int *target_errno
)
3174 struct target_ops
*t
;
3176 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3178 if (t
->to_fileio_close
!= NULL
)
3180 int ret
= t
->to_fileio_close (t
, fd
, target_errno
);
3183 fprintf_unfiltered (gdb_stdlog
,
3184 "target_fileio_close (%d) = %d (%d)\n",
3185 fd
, ret
, ret
!= -1 ? 0 : *target_errno
);
3190 *target_errno
= FILEIO_ENOSYS
;
3194 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3195 occurs (and set *TARGET_ERRNO). */
3197 target_fileio_unlink (const char *filename
, int *target_errno
)
3199 struct target_ops
*t
;
3201 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3203 if (t
->to_fileio_unlink
!= NULL
)
3205 int ret
= t
->to_fileio_unlink (t
, filename
, target_errno
);
3208 fprintf_unfiltered (gdb_stdlog
,
3209 "target_fileio_unlink (%s) = %d (%d)\n",
3210 filename
, ret
, ret
!= -1 ? 0 : *target_errno
);
3215 *target_errno
= FILEIO_ENOSYS
;
3219 /* Read value of symbolic link FILENAME on the target. Return a
3220 null-terminated string allocated via xmalloc, or NULL if an error
3221 occurs (and set *TARGET_ERRNO). */
3223 target_fileio_readlink (const char *filename
, int *target_errno
)
3225 struct target_ops
*t
;
3227 for (t
= default_fileio_target (); t
!= NULL
; t
= t
->beneath
)
3229 if (t
->to_fileio_readlink
!= NULL
)
3231 char *ret
= t
->to_fileio_readlink (t
, filename
, target_errno
);
3234 fprintf_unfiltered (gdb_stdlog
,
3235 "target_fileio_readlink (%s) = %s (%d)\n",
3236 filename
, ret
? ret
: "(nil)",
3237 ret
? 0 : *target_errno
);
3242 *target_errno
= FILEIO_ENOSYS
;
3247 target_fileio_close_cleanup (void *opaque
)
3249 int fd
= *(int *) opaque
;
3252 target_fileio_close (fd
, &target_errno
);
3255 /* Read target file FILENAME. Store the result in *BUF_P and
3256 return the size of the transferred data. PADDING additional bytes are
3257 available in *BUF_P. This is a helper function for
3258 target_fileio_read_alloc; see the declaration of that function for more
3262 target_fileio_read_alloc_1 (const char *filename
,
3263 gdb_byte
**buf_p
, int padding
)
3265 struct cleanup
*close_cleanup
;
3266 size_t buf_alloc
, buf_pos
;
3272 fd
= target_fileio_open (filename
, FILEIO_O_RDONLY
, 0700, &target_errno
);
3276 close_cleanup
= make_cleanup (target_fileio_close_cleanup
, &fd
);
3278 /* Start by reading up to 4K at a time. The target will throttle
3279 this number down if necessary. */
3281 buf
= xmalloc (buf_alloc
);
3285 n
= target_fileio_pread (fd
, &buf
[buf_pos
],
3286 buf_alloc
- buf_pos
- padding
, buf_pos
,
3290 /* An error occurred. */
3291 do_cleanups (close_cleanup
);
3297 /* Read all there was. */
3298 do_cleanups (close_cleanup
);
3308 /* If the buffer is filling up, expand it. */
3309 if (buf_alloc
< buf_pos
* 2)
3312 buf
= xrealloc (buf
, buf_alloc
);
3319 /* Read target file FILENAME. Store the result in *BUF_P and return
3320 the size of the transferred data. See the declaration in "target.h"
3321 function for more information about the return value. */
3324 target_fileio_read_alloc (const char *filename
, gdb_byte
**buf_p
)
3326 return target_fileio_read_alloc_1 (filename
, buf_p
, 0);
3329 /* Read target file FILENAME. The result is NUL-terminated and
3330 returned as a string, allocated using xmalloc. If an error occurs
3331 or the transfer is unsupported, NULL is returned. Empty objects
3332 are returned as allocated but empty strings. A warning is issued
3333 if the result contains any embedded NUL bytes. */
3336 target_fileio_read_stralloc (const char *filename
)
3340 LONGEST i
, transferred
;
3342 transferred
= target_fileio_read_alloc_1 (filename
, &buffer
, 1);
3343 bufstr
= (char *) buffer
;
3345 if (transferred
< 0)
3348 if (transferred
== 0)
3349 return xstrdup ("");
3351 bufstr
[transferred
] = 0;
3353 /* Check for embedded NUL bytes; but allow trailing NULs. */
3354 for (i
= strlen (bufstr
); i
< transferred
; i
++)
3357 warning (_("target file %s "
3358 "contained unexpected null characters"),
3368 default_region_ok_for_hw_watchpoint (struct target_ops
*self
,
3369 CORE_ADDR addr
, int len
)
3371 return (len
<= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT
);
3375 default_watchpoint_addr_within_range (struct target_ops
*target
,
3377 CORE_ADDR start
, int length
)
3379 return addr
>= start
&& addr
< start
+ length
;
3382 static struct gdbarch
*
3383 default_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
3385 return target_gdbarch ();
3395 * Find the next target down the stack from the specified target.
3399 find_target_beneath (struct target_ops
*t
)
3407 find_target_at (enum strata stratum
)
3409 struct target_ops
*t
;
3411 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3412 if (t
->to_stratum
== stratum
)
3419 /* The inferior process has died. Long live the inferior! */
3422 generic_mourn_inferior (void)
3426 ptid
= inferior_ptid
;
3427 inferior_ptid
= null_ptid
;
3429 /* Mark breakpoints uninserted in case something tries to delete a
3430 breakpoint while we delete the inferior's threads (which would
3431 fail, since the inferior is long gone). */
3432 mark_breakpoints_out ();
3434 if (!ptid_equal (ptid
, null_ptid
))
3436 int pid
= ptid_get_pid (ptid
);
3437 exit_inferior (pid
);
3440 /* Note this wipes step-resume breakpoints, so needs to be done
3441 after exit_inferior, which ends up referencing the step-resume
3442 breakpoints through clear_thread_inferior_resources. */
3443 breakpoint_init_inferior (inf_exited
);
3445 registers_changed ();
3447 reopen_exec_file ();
3448 reinit_frame_cache ();
3450 if (deprecated_detach_hook
)
3451 deprecated_detach_hook ();
3454 /* Convert a normal process ID to a string. Returns the string in a
3458 normal_pid_to_str (ptid_t ptid
)
3460 static char buf
[32];
3462 xsnprintf (buf
, sizeof buf
, "process %d", ptid_get_pid (ptid
));
3467 default_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3469 return normal_pid_to_str (ptid
);
3472 /* Error-catcher for target_find_memory_regions. */
3474 dummy_find_memory_regions (struct target_ops
*self
,
3475 find_memory_region_ftype ignore1
, void *ignore2
)
3477 error (_("Command not implemented for this target."));
3481 /* Error-catcher for target_make_corefile_notes. */
3483 dummy_make_corefile_notes (struct target_ops
*self
,
3484 bfd
*ignore1
, int *ignore2
)
3486 error (_("Command not implemented for this target."));
3490 /* Set up the handful of non-empty slots needed by the dummy target
3494 init_dummy_target (void)
3496 dummy_target
.to_shortname
= "None";
3497 dummy_target
.to_longname
= "None";
3498 dummy_target
.to_doc
= "";
3499 dummy_target
.to_create_inferior
= find_default_create_inferior
;
3500 dummy_target
.to_supports_non_stop
= find_default_supports_non_stop
;
3501 dummy_target
.to_supports_disable_randomization
3502 = find_default_supports_disable_randomization
;
3503 dummy_target
.to_stratum
= dummy_stratum
;
3504 dummy_target
.to_has_all_memory
= (int (*) (struct target_ops
*)) return_zero
;
3505 dummy_target
.to_has_memory
= (int (*) (struct target_ops
*)) return_zero
;
3506 dummy_target
.to_has_stack
= (int (*) (struct target_ops
*)) return_zero
;
3507 dummy_target
.to_has_registers
= (int (*) (struct target_ops
*)) return_zero
;
3508 dummy_target
.to_has_execution
3509 = (int (*) (struct target_ops
*, ptid_t
)) return_zero
;
3510 dummy_target
.to_magic
= OPS_MAGIC
;
3512 install_dummy_methods (&dummy_target
);
3516 debug_to_open (char *args
, int from_tty
)
3518 debug_target
.to_open (args
, from_tty
);
3520 fprintf_unfiltered (gdb_stdlog
, "target_open (%s, %d)\n", args
, from_tty
);
3524 target_close (struct target_ops
*targ
)
3526 gdb_assert (!target_is_pushed (targ
));
3528 if (targ
->to_xclose
!= NULL
)
3529 targ
->to_xclose (targ
);
3530 else if (targ
->to_close
!= NULL
)
3531 targ
->to_close (targ
);
3534 fprintf_unfiltered (gdb_stdlog
, "target_close ()\n");
3538 target_attach (char *args
, int from_tty
)
3540 current_target
.to_attach (¤t_target
, args
, from_tty
);
3542 fprintf_unfiltered (gdb_stdlog
, "target_attach (%s, %d)\n",
3547 target_thread_alive (ptid_t ptid
)
3551 retval
= current_target
.to_thread_alive (¤t_target
, ptid
);
3553 fprintf_unfiltered (gdb_stdlog
, "target_thread_alive (%d) = %d\n",
3554 ptid_get_pid (ptid
), retval
);
3560 target_find_new_threads (void)
3562 current_target
.to_find_new_threads (¤t_target
);
3564 fprintf_unfiltered (gdb_stdlog
, "target_find_new_threads ()\n");
3568 target_stop (ptid_t ptid
)
3572 warning (_("May not interrupt or stop the target, ignoring attempt"));
3576 (*current_target
.to_stop
) (¤t_target
, ptid
);
3580 debug_to_post_attach (struct target_ops
*self
, int pid
)
3582 debug_target
.to_post_attach (&debug_target
, pid
);
3584 fprintf_unfiltered (gdb_stdlog
, "target_post_attach (%d)\n", pid
);
3587 /* Concatenate ELEM to LIST, a comma separate list, and return the
3588 result. The LIST incoming argument is released. */
3591 str_comma_list_concat_elem (char *list
, const char *elem
)
3594 return xstrdup (elem
);
3596 return reconcat (list
, list
, ", ", elem
, (char *) NULL
);
3599 /* Helper for target_options_to_string. If OPT is present in
3600 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3601 Returns the new resulting string. OPT is removed from
3605 do_option (int *target_options
, char *ret
,
3606 int opt
, char *opt_str
)
3608 if ((*target_options
& opt
) != 0)
3610 ret
= str_comma_list_concat_elem (ret
, opt_str
);
3611 *target_options
&= ~opt
;
3618 target_options_to_string (int target_options
)
3622 #define DO_TARG_OPTION(OPT) \
3623 ret = do_option (&target_options, ret, OPT, #OPT)
3625 DO_TARG_OPTION (TARGET_WNOHANG
);
3627 if (target_options
!= 0)
3628 ret
= str_comma_list_concat_elem (ret
, "unknown???");
3636 debug_print_register (const char * func
,
3637 struct regcache
*regcache
, int regno
)
3639 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3641 fprintf_unfiltered (gdb_stdlog
, "%s ", func
);
3642 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
)
3643 && gdbarch_register_name (gdbarch
, regno
) != NULL
3644 && gdbarch_register_name (gdbarch
, regno
)[0] != '\0')
3645 fprintf_unfiltered (gdb_stdlog
, "(%s)",
3646 gdbarch_register_name (gdbarch
, regno
));
3648 fprintf_unfiltered (gdb_stdlog
, "(%d)", regno
);
3649 if (regno
>= 0 && regno
< gdbarch_num_regs (gdbarch
))
3651 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
3652 int i
, size
= register_size (gdbarch
, regno
);
3653 gdb_byte buf
[MAX_REGISTER_SIZE
];
3655 regcache_raw_collect (regcache
, regno
, buf
);
3656 fprintf_unfiltered (gdb_stdlog
, " = ");
3657 for (i
= 0; i
< size
; i
++)
3659 fprintf_unfiltered (gdb_stdlog
, "%02x", buf
[i
]);
3661 if (size
<= sizeof (LONGEST
))
3663 ULONGEST val
= extract_unsigned_integer (buf
, size
, byte_order
);
3665 fprintf_unfiltered (gdb_stdlog
, " %s %s",
3666 core_addr_to_string_nz (val
), plongest (val
));
3669 fprintf_unfiltered (gdb_stdlog
, "\n");
3673 target_fetch_registers (struct regcache
*regcache
, int regno
)
3675 current_target
.to_fetch_registers (¤t_target
, regcache
, regno
);
3677 debug_print_register ("target_fetch_registers", regcache
, regno
);
3681 target_store_registers (struct regcache
*regcache
, int regno
)
3683 struct target_ops
*t
;
3685 if (!may_write_registers
)
3686 error (_("Writing to registers is not allowed (regno %d)"), regno
);
3688 current_target
.to_store_registers (¤t_target
, regcache
, regno
);
3691 debug_print_register ("target_store_registers", regcache
, regno
);
3696 target_core_of_thread (ptid_t ptid
)
3698 int retval
= current_target
.to_core_of_thread (¤t_target
, ptid
);
3701 fprintf_unfiltered (gdb_stdlog
,
3702 "target_core_of_thread (%d) = %d\n",
3703 ptid_get_pid (ptid
), retval
);
3708 target_verify_memory (const gdb_byte
*data
, CORE_ADDR memaddr
, ULONGEST size
)
3710 int retval
= current_target
.to_verify_memory (¤t_target
,
3711 data
, memaddr
, size
);
3714 fprintf_unfiltered (gdb_stdlog
,
3715 "target_verify_memory (%s, %s) = %d\n",
3716 paddress (target_gdbarch (), memaddr
),
3722 /* The documentation for this function is in its prototype declaration in
3726 target_insert_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3730 ret
= current_target
.to_insert_mask_watchpoint (¤t_target
,
3734 fprintf_unfiltered (gdb_stdlog
, "\
3735 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
3736 core_addr_to_string (addr
),
3737 core_addr_to_string (mask
), rw
, ret
);
3742 /* The documentation for this function is in its prototype declaration in
3746 target_remove_mask_watchpoint (CORE_ADDR addr
, CORE_ADDR mask
, int rw
)
3750 ret
= current_target
.to_remove_mask_watchpoint (¤t_target
,
3754 fprintf_unfiltered (gdb_stdlog
, "\
3755 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
3756 core_addr_to_string (addr
),
3757 core_addr_to_string (mask
), rw
, ret
);
3762 /* The documentation for this function is in its prototype declaration
3766 target_masked_watch_num_registers (CORE_ADDR addr
, CORE_ADDR mask
)
3768 return current_target
.to_masked_watch_num_registers (¤t_target
,
3772 /* The documentation for this function is in its prototype declaration
3776 target_ranged_break_num_registers (void)
3778 return current_target
.to_ranged_break_num_registers (¤t_target
);
3783 struct btrace_target_info
*
3784 target_enable_btrace (ptid_t ptid
)
3786 struct target_ops
*t
;
3788 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3789 if (t
->to_enable_btrace
!= NULL
)
3790 return t
->to_enable_btrace (t
, ptid
);
3799 target_disable_btrace (struct btrace_target_info
*btinfo
)
3801 struct target_ops
*t
;
3803 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3804 if (t
->to_disable_btrace
!= NULL
)
3806 t
->to_disable_btrace (t
, btinfo
);
3816 target_teardown_btrace (struct btrace_target_info
*btinfo
)
3818 struct target_ops
*t
;
3820 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3821 if (t
->to_teardown_btrace
!= NULL
)
3823 t
->to_teardown_btrace (t
, btinfo
);
3833 target_read_btrace (VEC (btrace_block_s
) **btrace
,
3834 struct btrace_target_info
*btinfo
,
3835 enum btrace_read_type type
)
3837 struct target_ops
*t
;
3839 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3840 if (t
->to_read_btrace
!= NULL
)
3841 return t
->to_read_btrace (t
, btrace
, btinfo
, type
);
3844 return BTRACE_ERR_NOT_SUPPORTED
;
3850 target_stop_recording (void)
3852 struct target_ops
*t
;
3854 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3855 if (t
->to_stop_recording
!= NULL
)
3857 t
->to_stop_recording (t
);
3861 /* This is optional. */
3867 target_info_record (void)
3869 struct target_ops
*t
;
3871 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3872 if (t
->to_info_record
!= NULL
)
3874 t
->to_info_record (t
);
3884 target_save_record (const char *filename
)
3886 current_target
.to_save_record (¤t_target
, filename
);
3892 target_supports_delete_record (void)
3894 struct target_ops
*t
;
3896 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
3897 if (t
->to_delete_record
!= NULL
)
3906 target_delete_record (void)
3908 current_target
.to_delete_record (¤t_target
);
3914 target_record_is_replaying (void)
3916 return current_target
.to_record_is_replaying (¤t_target
);
3922 target_goto_record_begin (void)
3924 current_target
.to_goto_record_begin (¤t_target
);
3930 target_goto_record_end (void)
3932 current_target
.to_goto_record_end (¤t_target
);
3938 target_goto_record (ULONGEST insn
)
3940 current_target
.to_goto_record (¤t_target
, insn
);
3946 target_insn_history (int size
, int flags
)
3948 current_target
.to_insn_history (¤t_target
, size
, flags
);
3954 target_insn_history_from (ULONGEST from
, int size
, int flags
)
3956 current_target
.to_insn_history_from (¤t_target
, from
, size
, flags
);
3962 target_insn_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
3964 current_target
.to_insn_history_range (¤t_target
, begin
, end
, flags
);
3970 target_call_history (int size
, int flags
)
3972 current_target
.to_call_history (¤t_target
, size
, flags
);
3978 target_call_history_from (ULONGEST begin
, int size
, int flags
)
3980 current_target
.to_call_history_from (¤t_target
, begin
, size
, flags
);
3986 target_call_history_range (ULONGEST begin
, ULONGEST end
, int flags
)
3988 current_target
.to_call_history_range (¤t_target
, begin
, end
, flags
);
3992 debug_to_prepare_to_store (struct target_ops
*self
, struct regcache
*regcache
)
3994 debug_target
.to_prepare_to_store (&debug_target
, regcache
);
3996 fprintf_unfiltered (gdb_stdlog
, "target_prepare_to_store ()\n");
4001 const struct frame_unwind
*
4002 target_get_unwinder (void)
4004 struct target_ops
*t
;
4006 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4007 if (t
->to_get_unwinder
!= NULL
)
4008 return t
->to_get_unwinder
;
4015 const struct frame_unwind
*
4016 target_get_tailcall_unwinder (void)
4018 struct target_ops
*t
;
4020 for (t
= current_target
.beneath
; t
!= NULL
; t
= t
->beneath
)
4021 if (t
->to_get_tailcall_unwinder
!= NULL
)
4022 return t
->to_get_tailcall_unwinder
;
4030 forward_target_decr_pc_after_break (struct target_ops
*ops
,
4031 struct gdbarch
*gdbarch
)
4033 for (; ops
!= NULL
; ops
= ops
->beneath
)
4034 if (ops
->to_decr_pc_after_break
!= NULL
)
4035 return ops
->to_decr_pc_after_break (ops
, gdbarch
);
4037 return gdbarch_decr_pc_after_break (gdbarch
);
4043 target_decr_pc_after_break (struct gdbarch
*gdbarch
)
4045 return forward_target_decr_pc_after_break (current_target
.beneath
, gdbarch
);
4049 deprecated_debug_xfer_memory (CORE_ADDR memaddr
, bfd_byte
*myaddr
, int len
,
4050 int write
, struct mem_attrib
*attrib
,
4051 struct target_ops
*target
)
4055 retval
= debug_target
.deprecated_xfer_memory (memaddr
, myaddr
, len
, write
,
4058 fprintf_unfiltered (gdb_stdlog
,
4059 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4060 paddress (target_gdbarch (), memaddr
), len
,
4061 write
? "write" : "read", retval
);
4067 fputs_unfiltered (", bytes =", gdb_stdlog
);
4068 for (i
= 0; i
< retval
; i
++)
4070 if ((((intptr_t) &(myaddr
[i
])) & 0xf) == 0)
4072 if (targetdebug
< 2 && i
> 0)
4074 fprintf_unfiltered (gdb_stdlog
, " ...");
4077 fprintf_unfiltered (gdb_stdlog
, "\n");
4080 fprintf_unfiltered (gdb_stdlog
, " %02x", myaddr
[i
] & 0xff);
4084 fputc_unfiltered ('\n', gdb_stdlog
);
4090 debug_to_files_info (struct target_ops
*target
)
4092 debug_target
.to_files_info (target
);
4094 fprintf_unfiltered (gdb_stdlog
, "target_files_info (xxx)\n");
4098 debug_to_insert_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4099 struct bp_target_info
*bp_tgt
)
4103 retval
= debug_target
.to_insert_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4105 fprintf_unfiltered (gdb_stdlog
,
4106 "target_insert_breakpoint (%s, xxx) = %ld\n",
4107 core_addr_to_string (bp_tgt
->placed_address
),
4108 (unsigned long) retval
);
4113 debug_to_remove_breakpoint (struct target_ops
*ops
, struct gdbarch
*gdbarch
,
4114 struct bp_target_info
*bp_tgt
)
4118 retval
= debug_target
.to_remove_breakpoint (&debug_target
, gdbarch
, bp_tgt
);
4120 fprintf_unfiltered (gdb_stdlog
,
4121 "target_remove_breakpoint (%s, xxx) = %ld\n",
4122 core_addr_to_string (bp_tgt
->placed_address
),
4123 (unsigned long) retval
);
4128 debug_to_can_use_hw_breakpoint (struct target_ops
*self
,
4129 int type
, int cnt
, int from_tty
)
4133 retval
= debug_target
.to_can_use_hw_breakpoint (&debug_target
,
4134 type
, cnt
, from_tty
);
4136 fprintf_unfiltered (gdb_stdlog
,
4137 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4138 (unsigned long) type
,
4139 (unsigned long) cnt
,
4140 (unsigned long) from_tty
,
4141 (unsigned long) retval
);
4146 debug_to_region_ok_for_hw_watchpoint (struct target_ops
*self
,
4147 CORE_ADDR addr
, int len
)
4151 retval
= debug_target
.to_region_ok_for_hw_watchpoint (&debug_target
,
4154 fprintf_unfiltered (gdb_stdlog
,
4155 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4156 core_addr_to_string (addr
), (unsigned long) len
,
4157 core_addr_to_string (retval
));
4162 debug_to_can_accel_watchpoint_condition (struct target_ops
*self
,
4163 CORE_ADDR addr
, int len
, int rw
,
4164 struct expression
*cond
)
4168 retval
= debug_target
.to_can_accel_watchpoint_condition (&debug_target
,
4172 fprintf_unfiltered (gdb_stdlog
,
4173 "target_can_accel_watchpoint_condition "
4174 "(%s, %d, %d, %s) = %ld\n",
4175 core_addr_to_string (addr
), len
, rw
,
4176 host_address_to_string (cond
), (unsigned long) retval
);
4181 debug_to_stopped_by_watchpoint (struct target_ops
*ops
)
4185 retval
= debug_target
.to_stopped_by_watchpoint (&debug_target
);
4187 fprintf_unfiltered (gdb_stdlog
,
4188 "target_stopped_by_watchpoint () = %ld\n",
4189 (unsigned long) retval
);
4194 debug_to_stopped_data_address (struct target_ops
*target
, CORE_ADDR
*addr
)
4198 retval
= debug_target
.to_stopped_data_address (target
, addr
);
4200 fprintf_unfiltered (gdb_stdlog
,
4201 "target_stopped_data_address ([%s]) = %ld\n",
4202 core_addr_to_string (*addr
),
4203 (unsigned long)retval
);
4208 debug_to_watchpoint_addr_within_range (struct target_ops
*target
,
4210 CORE_ADDR start
, int length
)
4214 retval
= debug_target
.to_watchpoint_addr_within_range (target
, addr
,
4217 fprintf_filtered (gdb_stdlog
,
4218 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4219 core_addr_to_string (addr
), core_addr_to_string (start
),
4225 debug_to_insert_hw_breakpoint (struct target_ops
*self
,
4226 struct gdbarch
*gdbarch
,
4227 struct bp_target_info
*bp_tgt
)
4231 retval
= debug_target
.to_insert_hw_breakpoint (&debug_target
,
4234 fprintf_unfiltered (gdb_stdlog
,
4235 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4236 core_addr_to_string (bp_tgt
->placed_address
),
4237 (unsigned long) retval
);
4242 debug_to_remove_hw_breakpoint (struct target_ops
*self
,
4243 struct gdbarch
*gdbarch
,
4244 struct bp_target_info
*bp_tgt
)
4248 retval
= debug_target
.to_remove_hw_breakpoint (&debug_target
,
4251 fprintf_unfiltered (gdb_stdlog
,
4252 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4253 core_addr_to_string (bp_tgt
->placed_address
),
4254 (unsigned long) retval
);
4259 debug_to_insert_watchpoint (struct target_ops
*self
,
4260 CORE_ADDR addr
, int len
, int type
,
4261 struct expression
*cond
)
4265 retval
= debug_target
.to_insert_watchpoint (&debug_target
,
4266 addr
, len
, type
, cond
);
4268 fprintf_unfiltered (gdb_stdlog
,
4269 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4270 core_addr_to_string (addr
), len
, type
,
4271 host_address_to_string (cond
), (unsigned long) retval
);
4276 debug_to_remove_watchpoint (struct target_ops
*self
,
4277 CORE_ADDR addr
, int len
, int type
,
4278 struct expression
*cond
)
4282 retval
= debug_target
.to_remove_watchpoint (&debug_target
,
4283 addr
, len
, type
, cond
);
4285 fprintf_unfiltered (gdb_stdlog
,
4286 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4287 core_addr_to_string (addr
), len
, type
,
4288 host_address_to_string (cond
), (unsigned long) retval
);
4293 debug_to_terminal_init (struct target_ops
*self
)
4295 debug_target
.to_terminal_init (&debug_target
);
4297 fprintf_unfiltered (gdb_stdlog
, "target_terminal_init ()\n");
4301 debug_to_terminal_inferior (struct target_ops
*self
)
4303 debug_target
.to_terminal_inferior (&debug_target
);
4305 fprintf_unfiltered (gdb_stdlog
, "target_terminal_inferior ()\n");
4309 debug_to_terminal_ours_for_output (struct target_ops
*self
)
4311 debug_target
.to_terminal_ours_for_output (&debug_target
);
4313 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours_for_output ()\n");
4317 debug_to_terminal_ours (struct target_ops
*self
)
4319 debug_target
.to_terminal_ours (&debug_target
);
4321 fprintf_unfiltered (gdb_stdlog
, "target_terminal_ours ()\n");
4325 debug_to_terminal_save_ours (struct target_ops
*self
)
4327 debug_target
.to_terminal_save_ours (&debug_target
);
4329 fprintf_unfiltered (gdb_stdlog
, "target_terminal_save_ours ()\n");
4333 debug_to_terminal_info (struct target_ops
*self
,
4334 const char *arg
, int from_tty
)
4336 debug_target
.to_terminal_info (&debug_target
, arg
, from_tty
);
4338 fprintf_unfiltered (gdb_stdlog
, "target_terminal_info (%s, %d)\n", arg
,
4343 debug_to_load (struct target_ops
*self
, char *args
, int from_tty
)
4345 debug_target
.to_load (&debug_target
, args
, from_tty
);
4347 fprintf_unfiltered (gdb_stdlog
, "target_load (%s, %d)\n", args
, from_tty
);
4351 debug_to_post_startup_inferior (struct target_ops
*self
, ptid_t ptid
)
4353 debug_target
.to_post_startup_inferior (&debug_target
, ptid
);
4355 fprintf_unfiltered (gdb_stdlog
, "target_post_startup_inferior (%d)\n",
4356 ptid_get_pid (ptid
));
4360 debug_to_insert_fork_catchpoint (struct target_ops
*self
, int pid
)
4364 retval
= debug_target
.to_insert_fork_catchpoint (&debug_target
, pid
);
4366 fprintf_unfiltered (gdb_stdlog
, "target_insert_fork_catchpoint (%d) = %d\n",
4373 debug_to_remove_fork_catchpoint (struct target_ops
*self
, int pid
)
4377 retval
= debug_target
.to_remove_fork_catchpoint (&debug_target
, pid
);
4379 fprintf_unfiltered (gdb_stdlog
, "target_remove_fork_catchpoint (%d) = %d\n",
4386 debug_to_insert_vfork_catchpoint (struct target_ops
*self
, int pid
)
4390 retval
= debug_target
.to_insert_vfork_catchpoint (&debug_target
, pid
);
4392 fprintf_unfiltered (gdb_stdlog
, "target_insert_vfork_catchpoint (%d) = %d\n",
4399 debug_to_remove_vfork_catchpoint (struct target_ops
*self
, int pid
)
4403 retval
= debug_target
.to_remove_vfork_catchpoint (&debug_target
, pid
);
4405 fprintf_unfiltered (gdb_stdlog
, "target_remove_vfork_catchpoint (%d) = %d\n",
4412 debug_to_insert_exec_catchpoint (struct target_ops
*self
, int pid
)
4416 retval
= debug_target
.to_insert_exec_catchpoint (&debug_target
, pid
);
4418 fprintf_unfiltered (gdb_stdlog
, "target_insert_exec_catchpoint (%d) = %d\n",
4425 debug_to_remove_exec_catchpoint (struct target_ops
*self
, int pid
)
4429 retval
= debug_target
.to_remove_exec_catchpoint (&debug_target
, pid
);
4431 fprintf_unfiltered (gdb_stdlog
, "target_remove_exec_catchpoint (%d) = %d\n",
4438 debug_to_has_exited (struct target_ops
*self
,
4439 int pid
, int wait_status
, int *exit_status
)
4443 has_exited
= debug_target
.to_has_exited (&debug_target
,
4444 pid
, wait_status
, exit_status
);
4446 fprintf_unfiltered (gdb_stdlog
, "target_has_exited (%d, %d, %d) = %d\n",
4447 pid
, wait_status
, *exit_status
, has_exited
);
4453 debug_to_can_run (struct target_ops
*self
)
4457 retval
= debug_target
.to_can_run (&debug_target
);
4459 fprintf_unfiltered (gdb_stdlog
, "target_can_run () = %d\n", retval
);
4464 static struct gdbarch
*
4465 debug_to_thread_architecture (struct target_ops
*ops
, ptid_t ptid
)
4467 struct gdbarch
*retval
;
4469 retval
= debug_target
.to_thread_architecture (ops
, ptid
);
4471 fprintf_unfiltered (gdb_stdlog
,
4472 "target_thread_architecture (%s) = %s [%s]\n",
4473 target_pid_to_str (ptid
),
4474 host_address_to_string (retval
),
4475 gdbarch_bfd_arch_info (retval
)->printable_name
);
4480 debug_to_stop (struct target_ops
*self
, ptid_t ptid
)
4482 debug_target
.to_stop (&debug_target
, ptid
);
4484 fprintf_unfiltered (gdb_stdlog
, "target_stop (%s)\n",
4485 target_pid_to_str (ptid
));
4489 debug_to_rcmd (struct target_ops
*self
, char *command
,
4490 struct ui_file
*outbuf
)
4492 debug_target
.to_rcmd (&debug_target
, command
, outbuf
);
4493 fprintf_unfiltered (gdb_stdlog
, "target_rcmd (%s, ...)\n", command
);
4497 debug_to_pid_to_exec_file (struct target_ops
*self
, int pid
)
4501 exec_file
= debug_target
.to_pid_to_exec_file (&debug_target
, pid
);
4503 fprintf_unfiltered (gdb_stdlog
, "target_pid_to_exec_file (%d) = %s\n",
4510 setup_target_debug (void)
4512 memcpy (&debug_target
, ¤t_target
, sizeof debug_target
);
4514 current_target
.to_open
= debug_to_open
;
4515 current_target
.to_post_attach
= debug_to_post_attach
;
4516 current_target
.to_prepare_to_store
= debug_to_prepare_to_store
;
4517 current_target
.deprecated_xfer_memory
= deprecated_debug_xfer_memory
;
4518 current_target
.to_files_info
= debug_to_files_info
;
4519 current_target
.to_insert_breakpoint
= debug_to_insert_breakpoint
;
4520 current_target
.to_remove_breakpoint
= debug_to_remove_breakpoint
;
4521 current_target
.to_can_use_hw_breakpoint
= debug_to_can_use_hw_breakpoint
;
4522 current_target
.to_insert_hw_breakpoint
= debug_to_insert_hw_breakpoint
;
4523 current_target
.to_remove_hw_breakpoint
= debug_to_remove_hw_breakpoint
;
4524 current_target
.to_insert_watchpoint
= debug_to_insert_watchpoint
;
4525 current_target
.to_remove_watchpoint
= debug_to_remove_watchpoint
;
4526 current_target
.to_stopped_by_watchpoint
= debug_to_stopped_by_watchpoint
;
4527 current_target
.to_stopped_data_address
= debug_to_stopped_data_address
;
4528 current_target
.to_watchpoint_addr_within_range
4529 = debug_to_watchpoint_addr_within_range
;
4530 current_target
.to_region_ok_for_hw_watchpoint
4531 = debug_to_region_ok_for_hw_watchpoint
;
4532 current_target
.to_can_accel_watchpoint_condition
4533 = debug_to_can_accel_watchpoint_condition
;
4534 current_target
.to_terminal_init
= debug_to_terminal_init
;
4535 current_target
.to_terminal_inferior
= debug_to_terminal_inferior
;
4536 current_target
.to_terminal_ours_for_output
4537 = debug_to_terminal_ours_for_output
;
4538 current_target
.to_terminal_ours
= debug_to_terminal_ours
;
4539 current_target
.to_terminal_save_ours
= debug_to_terminal_save_ours
;
4540 current_target
.to_terminal_info
= debug_to_terminal_info
;
4541 current_target
.to_load
= debug_to_load
;
4542 current_target
.to_post_startup_inferior
= debug_to_post_startup_inferior
;
4543 current_target
.to_insert_fork_catchpoint
= debug_to_insert_fork_catchpoint
;
4544 current_target
.to_remove_fork_catchpoint
= debug_to_remove_fork_catchpoint
;
4545 current_target
.to_insert_vfork_catchpoint
= debug_to_insert_vfork_catchpoint
;
4546 current_target
.to_remove_vfork_catchpoint
= debug_to_remove_vfork_catchpoint
;
4547 current_target
.to_insert_exec_catchpoint
= debug_to_insert_exec_catchpoint
;
4548 current_target
.to_remove_exec_catchpoint
= debug_to_remove_exec_catchpoint
;
4549 current_target
.to_has_exited
= debug_to_has_exited
;
4550 current_target
.to_can_run
= debug_to_can_run
;
4551 current_target
.to_stop
= debug_to_stop
;
4552 current_target
.to_rcmd
= debug_to_rcmd
;
4553 current_target
.to_pid_to_exec_file
= debug_to_pid_to_exec_file
;
4554 current_target
.to_thread_architecture
= debug_to_thread_architecture
;
4558 static char targ_desc
[] =
4559 "Names of targets and files being debugged.\nShows the entire \
4560 stack of targets currently in use (including the exec-file,\n\
4561 core-file, and process, if any), as well as the symbol file name.";
4564 default_rcmd (struct target_ops
*self
, char *command
, struct ui_file
*output
)
4566 error (_("\"monitor\" command not supported by this target."));
4570 do_monitor_command (char *cmd
,
4573 target_rcmd (cmd
, gdb_stdtarg
);
4576 /* Print the name of each layers of our target stack. */
4579 maintenance_print_target_stack (char *cmd
, int from_tty
)
4581 struct target_ops
*t
;
4583 printf_filtered (_("The current target stack is:\n"));
4585 for (t
= target_stack
; t
!= NULL
; t
= t
->beneath
)
4587 printf_filtered (" - %s (%s)\n", t
->to_shortname
, t
->to_longname
);
4591 /* Controls if async mode is permitted. */
4592 int target_async_permitted
= 0;
4594 /* The set command writes to this variable. If the inferior is
4595 executing, target_async_permitted is *not* updated. */
4596 static int target_async_permitted_1
= 0;
4599 set_target_async_command (char *args
, int from_tty
,
4600 struct cmd_list_element
*c
)
4602 if (have_live_inferiors ())
4604 target_async_permitted_1
= target_async_permitted
;
4605 error (_("Cannot change this setting while the inferior is running."));
4608 target_async_permitted
= target_async_permitted_1
;
4612 show_target_async_command (struct ui_file
*file
, int from_tty
,
4613 struct cmd_list_element
*c
,
4616 fprintf_filtered (file
,
4617 _("Controlling the inferior in "
4618 "asynchronous mode is %s.\n"), value
);
4621 /* Temporary copies of permission settings. */
4623 static int may_write_registers_1
= 1;
4624 static int may_write_memory_1
= 1;
4625 static int may_insert_breakpoints_1
= 1;
4626 static int may_insert_tracepoints_1
= 1;
4627 static int may_insert_fast_tracepoints_1
= 1;
4628 static int may_stop_1
= 1;
4630 /* Make the user-set values match the real values again. */
4633 update_target_permissions (void)
4635 may_write_registers_1
= may_write_registers
;
4636 may_write_memory_1
= may_write_memory
;
4637 may_insert_breakpoints_1
= may_insert_breakpoints
;
4638 may_insert_tracepoints_1
= may_insert_tracepoints
;
4639 may_insert_fast_tracepoints_1
= may_insert_fast_tracepoints
;
4640 may_stop_1
= may_stop
;
4643 /* The one function handles (most of) the permission flags in the same
4647 set_target_permissions (char *args
, int from_tty
,
4648 struct cmd_list_element
*c
)
4650 if (target_has_execution
)
4652 update_target_permissions ();
4653 error (_("Cannot change this setting while the inferior is running."));
4656 /* Make the real values match the user-changed values. */
4657 may_write_registers
= may_write_registers_1
;
4658 may_insert_breakpoints
= may_insert_breakpoints_1
;
4659 may_insert_tracepoints
= may_insert_tracepoints_1
;
4660 may_insert_fast_tracepoints
= may_insert_fast_tracepoints_1
;
4661 may_stop
= may_stop_1
;
4662 update_observer_mode ();
4665 /* Set memory write permission independently of observer mode. */
4668 set_write_memory_permission (char *args
, int from_tty
,
4669 struct cmd_list_element
*c
)
4671 /* Make the real values match the user-changed values. */
4672 may_write_memory
= may_write_memory_1
;
4673 update_observer_mode ();
4678 initialize_targets (void)
4680 init_dummy_target ();
4681 push_target (&dummy_target
);
4683 add_info ("target", target_info
, targ_desc
);
4684 add_info ("files", target_info
, targ_desc
);
4686 add_setshow_zuinteger_cmd ("target", class_maintenance
, &targetdebug
, _("\
4687 Set target debugging."), _("\
4688 Show target debugging."), _("\
4689 When non-zero, target debugging is enabled. Higher numbers are more\n\
4690 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4694 &setdebuglist
, &showdebuglist
);
4696 add_setshow_boolean_cmd ("trust-readonly-sections", class_support
,
4697 &trust_readonly
, _("\
4698 Set mode for reading from readonly sections."), _("\
4699 Show mode for reading from readonly sections."), _("\
4700 When this mode is on, memory reads from readonly sections (such as .text)\n\
4701 will be read from the object file instead of from the target. This will\n\
4702 result in significant performance improvement for remote targets."),
4704 show_trust_readonly
,
4705 &setlist
, &showlist
);
4707 add_com ("monitor", class_obscure
, do_monitor_command
,
4708 _("Send a command to the remote monitor (remote targets only)."));
4710 add_cmd ("target-stack", class_maintenance
, maintenance_print_target_stack
,
4711 _("Print the name of each layer of the internal target stack."),
4712 &maintenanceprintlist
);
4714 add_setshow_boolean_cmd ("target-async", no_class
,
4715 &target_async_permitted_1
, _("\
4716 Set whether gdb controls the inferior in asynchronous mode."), _("\
4717 Show whether gdb controls the inferior in asynchronous mode."), _("\
4718 Tells gdb whether to control the inferior in asynchronous mode."),
4719 set_target_async_command
,
4720 show_target_async_command
,
4724 add_setshow_boolean_cmd ("may-write-registers", class_support
,
4725 &may_write_registers_1
, _("\
4726 Set permission to write into registers."), _("\
4727 Show permission to write into registers."), _("\
4728 When this permission is on, GDB may write into the target's registers.\n\
4729 Otherwise, any sort of write attempt will result in an error."),
4730 set_target_permissions
, NULL
,
4731 &setlist
, &showlist
);
4733 add_setshow_boolean_cmd ("may-write-memory", class_support
,
4734 &may_write_memory_1
, _("\
4735 Set permission to write into target memory."), _("\
4736 Show permission to write into target memory."), _("\
4737 When this permission is on, GDB may write into the target's memory.\n\
4738 Otherwise, any sort of write attempt will result in an error."),
4739 set_write_memory_permission
, NULL
,
4740 &setlist
, &showlist
);
4742 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support
,
4743 &may_insert_breakpoints_1
, _("\
4744 Set permission to insert breakpoints in the target."), _("\
4745 Show permission to insert breakpoints in the target."), _("\
4746 When this permission is on, GDB may insert breakpoints in the program.\n\
4747 Otherwise, any sort of insertion attempt will result in an error."),
4748 set_target_permissions
, NULL
,
4749 &setlist
, &showlist
);
4751 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support
,
4752 &may_insert_tracepoints_1
, _("\
4753 Set permission to insert tracepoints in the target."), _("\
4754 Show permission to insert tracepoints in the target."), _("\
4755 When this permission is on, GDB may insert tracepoints in the program.\n\
4756 Otherwise, any sort of insertion attempt will result in an error."),
4757 set_target_permissions
, NULL
,
4758 &setlist
, &showlist
);
4760 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support
,
4761 &may_insert_fast_tracepoints_1
, _("\
4762 Set permission to insert fast tracepoints in the target."), _("\
4763 Show permission to insert fast tracepoints in the target."), _("\
4764 When this permission is on, GDB may insert fast tracepoints.\n\
4765 Otherwise, any sort of insertion attempt will result in an error."),
4766 set_target_permissions
, NULL
,
4767 &setlist
, &showlist
);
4769 add_setshow_boolean_cmd ("may-interrupt", class_support
,
4771 Set permission to interrupt or signal the target."), _("\
4772 Show permission to interrupt or signal the target."), _("\
4773 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4774 Otherwise, any attempt to interrupt or stop will be ignored."),
4775 set_target_permissions
, NULL
,
4776 &setlist
, &showlist
);